aboutsummaryrefslogtreecommitdiffstats
path: root/src/kube2msb/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'src/kube2msb/vendor/github.com')
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE21
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md352
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go248
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go182
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go44
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go34
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go26
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go161
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go94
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go12
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go20
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go12
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go21
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go27
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go124
-rw-r--r--src/kube2msb/vendor/github.com/beorn7/perks/LICENSE20
-rw-r--r--src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt2388
-rw-r--r--src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go292
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/LICENSE22
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/README.md142
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/json.go23
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/semver.go395
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/sort.go28
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/sql.go30
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go7
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go159
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go14
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go29
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go126
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go112
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go135
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go51
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go82
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go24
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go67
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go67
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go153
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go99
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go55
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go165
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go91
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go29
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go416
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go846
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go44
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go3
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go67
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go688
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go88
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go109
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go188
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go179
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md39
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go157
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go96
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go49
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go25
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go68
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go39
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go240
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go171
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go65
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/health/README.md11
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/health/health.go127
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md13
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go21
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go27
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go15
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE13
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go151
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go37
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go297
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go202
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go139
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go155
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go42
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/set.go245
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go44
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go334
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go124
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md67
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code191
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs425
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS27
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/README.md18
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/circle.yml11
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/duration.go33
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/size.go95
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/ulimit.go118
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md163
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE22
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/README.md74
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile1
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh10
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go123
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go103
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go91
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go53
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go30
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/container.go361
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go202
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh2
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go162
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go52
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go196
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go163
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go26
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh9
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go248
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go31
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go32
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go45
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go26
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go114
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go69
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/request.go131
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/response.go235
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/route.go183
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go240
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/router.go18
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go23
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md43
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md76
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go64
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go38
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go436
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go86
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go66
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go87
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go36
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go184
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go21
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go440
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go268
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go39
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE50
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/README.md116
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/fields.go497
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go277
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE36
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile43
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go228
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go872
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go175
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go1335
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go354
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go266
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go519
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go221
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go883
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go40
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go280
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go479
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go266
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go108
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go915
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go64
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go117
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go793
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go55
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go841
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/README44
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/glog.go1177
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/glog_file.go124
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/LICENSE31
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile43
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go223
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go867
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go1325
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go276
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go399
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go894
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go280
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go479
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go266
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go842
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go751
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go806
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/LICENSE190
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go583
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go37
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go205
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go79
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md67
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/README.md71
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/doc.go18
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go446
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/LICENSE28
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/README.md68
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/doc.go44
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/map.go146
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/merge.go99
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/mergo.go90
-rw-r--r--src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md61
-rw-r--r--src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go164
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/README.md117
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go245
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/reader.go51
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go75
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go16
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go46
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE17
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go64
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go3
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go402
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go237
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go94
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go121
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go139
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go78
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go61
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go3
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go71
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go291
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go40
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go41
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go41
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go35
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go73
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go78
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go106
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go413
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go61
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go124
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go6
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go6
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go332
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go51
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go57
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go125
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go9
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go14
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go30
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go5
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go31
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go15
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go127
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go8
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go72
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go143
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go27
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go40
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go25
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go25
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go25
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go12
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go15
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go9
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go99
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go121
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go33
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS1
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/LICENSE27
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/dce.go84
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/doc.go8
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/hash.go53
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/json.go30
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/node.go101
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/time.go132
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/util.go43
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/uuid.go163
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/version1.go41
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/version4.go25
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE28
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md53
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go75
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go175
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go109
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go119
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go147
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go50
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go450
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go361
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go166
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go142
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go65
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go726
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go540
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go145
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go234
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go247
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go364
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go433
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go40
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go174
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go305
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go753
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/labels.go206
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/metric.go98
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/time.go249
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/value.go403
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md11
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md18
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE7
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/README.md7
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/doc.go45
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/fs.go36
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/proc.go149
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go111
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go175
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/stat.go55
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/LICENSE28
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/README.md256
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/bool.go97
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/count.go97
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/duration.go86
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/flag.go836
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/float32.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/float64.go87
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go97
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int.go87
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int32.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int64.go87
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int8.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go128
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/ip.go96
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go122
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go100
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/string.go82
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go111
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint16.go89
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint32.go89
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint64.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint8.go91
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/LICENSE22
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go199
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/README.md148
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go922
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go585
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go2019
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go1419
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go39365
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl540
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go32
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl104
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl58
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go233
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl364
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go175
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go1997
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go1271
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go242
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go20
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go45
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/json.go1213
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go845
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go213
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go3
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh199
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go180
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go519
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json639
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/test.py126
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh80
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/time.go233
390 files changed, 114319 insertions, 0 deletions
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE b/src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md b/src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..b6aa84c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,352 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0), the core API is unlikely change much but please version
+control your Logrus to make sure you aren't fetching latest `master` on every
+build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+attached, the output is compatible with the
+[l2met](http://r.32k.io/l2met-introduction) format:
+
+```text
+time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
+time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
+time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
+time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
+time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(&logrus_airbrake.AirbrakeHook{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+```go
+// Not the real implementation of the Airbrake hook. Just a simple sample.
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ log.AddHook(new(AirbrakeHook))
+}
+
+type AirbrakeHook struct{}
+
+// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
+// the fields for the entry. See the Fields section of the README.
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ err := airbrake.Notify(entry.Data["error"].(error))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Info("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+// `Levels()` returns a slice of `Levels` the hook is fired for.
+func (hook *AirbrakeHook) Levels() []log.Level {
+ return []log.Level{
+ log.ErrorLevel,
+ log.FatalLevel,
+ log.PanicLevel,
+ }
+}
+```
+
+Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
+ Send errors to an exception tracking service compatible with the Airbrake API.
+ Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
+
+* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
+ Send errors to the Papertrail hosted logging service via UDP.
+
+* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
+ Send errors to remote syslog server.
+ Uses standard library `log/syslog` behind the scenes.
+
+* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
+ Send errors to a channel in hipchat.
+
+* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
+ Send logs to Loggly (https://www.loggly.com/)
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(logrus.JSONFormatter)
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(logrus.TextFormatter)
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotated(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..e164eec
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,248 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..d087124
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,182 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..038ce9f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,44 @@
+package logrus
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..0da2b36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks levelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..b09227c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,26 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type JSONFormatter struct{}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data)
+ data["time"] = entry.Time.Format(time.RFC3339)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b392e54
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks levelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(levelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stdout,
+ Formatter: new(TextFormatter),
+ Hooks: make(levelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ NewEntry(logger).Debugf(format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ NewEntry(logger).Infof(format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ NewEntry(logger).Errorf(format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ NewEntry(logger).Fatalf(format, args...)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ NewEntry(logger).Panicf(format, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ NewEntry(logger).Debug(args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ NewEntry(logger).Error(args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ NewEntry(logger).Fatal(args...)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ NewEntry(logger).Panic(args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ NewEntry(logger).Debugln(args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ NewEntry(logger).Infoln(args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ NewEntry(logger).Errorln(args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ NewEntry(logger).Fatalln(args...)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ NewEntry(logger).Panicln(args...)
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..43ee12e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
new file mode 100644
index 0000000..8fe02a4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
new file mode 100644
index 0000000..0428ee5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
@@ -0,0 +1,20 @@
+/*
+ Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+*/
+package logrus
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..a2c0b40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..276447b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..2e09f6f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..78e7889
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,124 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+ noQuoteNeeded *regexp.Regexp
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+ DisableColors bool
+ // Set to true to disable timestamp logging (useful when the output
+ // is redirected to a logging system already adding a timestamp)
+ DisableTimestamp bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+
+ var keys []string
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+
+ if isColored {
+ printColored(b, entry, keys)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch < '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ switch value.(type) {
+ case string:
+ if needsQuoting(value.(string)) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ case error:
+ if needsQuoting(value.(error).Error()) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ default:
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/beorn7/perks/LICENSE b/src/kube2msb/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..587b1fc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/LICENSE b/src/kube2msb/vendor/github.com/blang/semver/LICENSE
new file mode 100644
index 0000000..5ba5c86
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/src/kube2msb/vendor/github.com/blang/semver/README.md b/src/kube2msb/vendor/github.com/blang/semver/README.md
new file mode 100644
index 0000000..5171c5c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/README.md
@@ -0,0 +1,142 @@
+semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
+======
+
+semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
+
+Usage
+-----
+```bash
+$ go get github.com/blang/semver
+```
+Note: Always vendor your dependencies or fix on a specific version tag.
+
+```go
+import github.com/blang/semver
+v1, err := semver.Make("1.0.0-beta")
+v2, err := semver.Make("2.0.0-beta")
+v1.Compare(v2)
+```
+
+Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
+
+Why should I use this lib?
+-----
+
+- Fully spec compatible
+- No reflection
+- No regex
+- Fully tested (Coverage >99%)
+- Readable parsing/validation errors
+- Fast (See [Benchmarks](#benchmarks))
+- Only Stdlib
+- Uses values instead of pointers
+- Many features, see below
+
+
+Features
+-----
+
+- Parsing and validation at all levels
+- Comparator-like comparisons
+- Compare Helper Methods
+- InPlace manipulation
+- Sortable (implements sort.Interface)
+- database/sql compatible (sql.Scanner/Valuer)
+- encoding/json compatible (json.Marshaler/Unmarshaler)
+
+
+Example
+-----
+
+Have a look at full examples in [examples/main.go](examples/main.go)
+
+```go
+import github.com/blang/semver
+
+v, err := semver.Make("0.0.1-alpha.preview+123.github")
+fmt.Printf("Major: %d\n", v.Major)
+fmt.Printf("Minor: %d\n", v.Minor)
+fmt.Printf("Patch: %d\n", v.Patch)
+fmt.Printf("Pre: %s\n", v.Pre)
+fmt.Printf("Build: %s\n", v.Build)
+
+// Prerelease versions array
+if len(v.Pre) > 0 {
+ fmt.Println("Prerelease versions:")
+ for i, pre := range v.Pre {
+ fmt.Printf("%d: %q\n", i, pre)
+ }
+}
+
+// Build meta data array
+if len(v.Build) > 0 {
+ fmt.Println("Build meta data:")
+ for i, build := range v.Build {
+ fmt.Printf("%d: %q\n", i, build)
+ }
+}
+
+v001, err := semver.Make("0.0.1")
+// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
+v001.GT(v) == true
+v.LT(v001) == true
+v.GTE(v) == true
+v.LTE(v) == true
+
+// Or use v.Compare(v2) for comparisons (-1, 0, 1):
+v001.Compare(v) == 1
+v.Compare(v001) == -1
+v.Compare(v) == 0
+
+// Manipulate Version in place:
+v.Pre[0], err = semver.NewPRVersion("beta")
+if err != nil {
+ fmt.Printf("Error parsing pre release version: %q", err)
+}
+
+fmt.Println("\nValidate versions:")
+v.Build[0] = "?"
+
+err = v.Validate()
+if err != nil {
+ fmt.Printf("Validation failed: %s\n", err)
+}
+```
+
+Benchmarks
+-----
+
+ BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
+ BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
+ BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
+ BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
+ BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
+ BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
+ BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
+ BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
+ BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
+ BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
+ BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
+
+See benchmark cases at [semver_test.go](semver_test.go)
+
+
+Motivation
+-----
+
+I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
+
+
+Contribution
+-----
+
+Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
+
+
+License
+-----
+
+See [LICENSE](LICENSE) file.
diff --git a/src/kube2msb/vendor/github.com/blang/semver/json.go b/src/kube2msb/vendor/github.com/blang/semver/json.go
new file mode 100644
index 0000000..a74bf7c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/json.go
@@ -0,0 +1,23 @@
+package semver
+
+import (
+ "encoding/json"
+)
+
+// MarshalJSON implements the encoding/json.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(data []byte) (err error) {
+ var versionString string
+
+ if err = json.Unmarshal(data, &versionString); err != nil {
+ return
+ }
+
+ *v, err = Parse(versionString)
+
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/semver.go b/src/kube2msb/vendor/github.com/blang/semver/semver.go
new file mode 100644
index 0000000..bbf85ce
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/semver.go
@@ -0,0 +1,395 @@
+package semver
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ numbers string = "0123456789"
+ alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
+ alphanum = alphas + numbers
+)
+
+// SpecVersion is the latest fully supported spec version of semver
+var SpecVersion = Version{
+ Major: 2,
+ Minor: 0,
+ Patch: 0,
+}
+
+// Version represents a semver compatible version
+type Version struct {
+ Major uint64
+ Minor uint64
+ Patch uint64
+ Pre []PRVersion
+ Build []string //No Precendence
+}
+
+// Version to string
+func (v Version) String() string {
+ b := make([]byte, 0, 5)
+ b = strconv.AppendUint(b, v.Major, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Minor, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Patch, 10)
+
+ if len(v.Pre) > 0 {
+ b = append(b, '-')
+ b = append(b, v.Pre[0].String()...)
+
+ for _, pre := range v.Pre[1:] {
+ b = append(b, '.')
+ b = append(b, pre.String()...)
+ }
+ }
+
+ if len(v.Build) > 0 {
+ b = append(b, '+')
+ b = append(b, v.Build[0]...)
+
+ for _, build := range v.Build[1:] {
+ b = append(b, '.')
+ b = append(b, build...)
+ }
+ }
+
+ return string(b)
+}
+
+// Equals checks if v is equal to o.
+func (v Version) Equals(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// EQ checks if v is equal to o.
+func (v Version) EQ(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// NE checks if v is not equal to o.
+func (v Version) NE(o Version) bool {
+ return (v.Compare(o) != 0)
+}
+
+// GT checks if v is greater than o.
+func (v Version) GT(o Version) bool {
+ return (v.Compare(o) == 1)
+}
+
+// GTE checks if v is greater than or equal to o.
+func (v Version) GTE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// GE checks if v is greater than or equal to o.
+func (v Version) GE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// LT checks if v is less than o.
+func (v Version) LT(o Version) bool {
+ return (v.Compare(o) == -1)
+}
+
+// LTE checks if v is less than or equal to o.
+func (v Version) LTE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// LE checks if v is less than or equal to o.
+func (v Version) LE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// Compare compares Versions v to o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v Version) Compare(o Version) int {
+ if v.Major != o.Major {
+ if v.Major > o.Major {
+ return 1
+ }
+ return -1
+ }
+ if v.Minor != o.Minor {
+ if v.Minor > o.Minor {
+ return 1
+ }
+ return -1
+ }
+ if v.Patch != o.Patch {
+ if v.Patch > o.Patch {
+ return 1
+ }
+ return -1
+ }
+
+ // Quick comparison if a version has no prerelease versions
+ if len(v.Pre) == 0 && len(o.Pre) == 0 {
+ return 0
+ } else if len(v.Pre) == 0 && len(o.Pre) > 0 {
+ return 1
+ } else if len(v.Pre) > 0 && len(o.Pre) == 0 {
+ return -1
+ }
+
+ i := 0
+ for ; i < len(v.Pre) && i < len(o.Pre); i++ {
+ if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
+ continue
+ } else if comp == 1 {
+ return 1
+ } else {
+ return -1
+ }
+ }
+
+ // If all pr versions are the equal but one has further prversion, this one greater
+ if i == len(v.Pre) && i == len(o.Pre) {
+ return 0
+ } else if i == len(v.Pre) && i < len(o.Pre) {
+ return -1
+ } else {
+ return 1
+ }
+
+}
+
+// Validate validates v and returns error in case
+func (v Version) Validate() error {
+ // Major, Minor, Patch already validated using uint64
+
+ for _, pre := range v.Pre {
+ if !pre.IsNum { //Numeric prerelease versions already uint64
+ if len(pre.VersionStr) == 0 {
+ return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
+ }
+ if !containsOnly(pre.VersionStr, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
+ }
+ }
+ }
+
+ for _, build := range v.Build {
+ if len(build) == 0 {
+ return fmt.Errorf("Build meta data can not be empty %q", build)
+ }
+ if !containsOnly(build, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
+ }
+ }
+
+ return nil
+}
+
+// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
+func New(s string) (vp *Version, err error) {
+ v, err := Parse(s)
+ vp = &v
+ return
+}
+
+// Make is an alias for Parse, parses version string and returns a validated Version or error
+func Make(s string) (Version, error) {
+ return Parse(s)
+}
+
+// Parse parses version string and returns a validated Version or error
+func Parse(s string) (Version, error) {
+ if len(s) == 0 {
+ return Version{}, errors.New("Version string empty")
+ }
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ if len(parts) != 3 {
+ return Version{}, errors.New("No Major.Minor.Patch elements found")
+ }
+
+ // Major
+ if !containsOnly(parts[0], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
+ }
+ if hasLeadingZeroes(parts[0]) {
+ return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
+ }
+ major, err := strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ // Minor
+ if !containsOnly(parts[1], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
+ }
+ if hasLeadingZeroes(parts[1]) {
+ return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
+ }
+ minor, err := strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v := Version{}
+ v.Major = major
+ v.Minor = minor
+
+ var build, prerelease []string
+ patchStr := parts[2]
+
+ if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
+ build = strings.Split(patchStr[buildIndex+1:], ".")
+ patchStr = patchStr[:buildIndex]
+ }
+
+ if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
+ prerelease = strings.Split(patchStr[preIndex+1:], ".")
+ patchStr = patchStr[:preIndex]
+ }
+
+ if !containsOnly(patchStr, numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
+ }
+ if hasLeadingZeroes(patchStr) {
+ return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
+ }
+ patch, err := strconv.ParseUint(patchStr, 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v.Patch = patch
+
+ // Prerelease
+ for _, prstr := range prerelease {
+ parsedPR, err := NewPRVersion(prstr)
+ if err != nil {
+ return Version{}, err
+ }
+ v.Pre = append(v.Pre, parsedPR)
+ }
+
+ // Build meta data
+ for _, str := range build {
+ if len(str) == 0 {
+ return Version{}, errors.New("Build meta data is empty")
+ }
+ if !containsOnly(str, alphanum) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
+ }
+ v.Build = append(v.Build, str)
+ }
+
+ return v, nil
+}
+
+// MustParse is like Parse but panics if the version cannot be parsed.
+func MustParse(s string) Version {
+ v, err := Parse(s)
+ if err != nil {
+ panic(`semver: Parse(` + s + `): ` + err.Error())
+ }
+ return v
+}
+
+// PRVersion represents a PreRelease Version
+type PRVersion struct {
+ VersionStr string
+ VersionNum uint64
+ IsNum bool
+}
+
+// NewPRVersion creates a new valid prerelease version
+func NewPRVersion(s string) (PRVersion, error) {
+ if len(s) == 0 {
+ return PRVersion{}, errors.New("Prerelease is empty")
+ }
+ v := PRVersion{}
+ if containsOnly(s, numbers) {
+ if hasLeadingZeroes(s) {
+ return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
+ }
+ num, err := strconv.ParseUint(s, 10, 64)
+
+ // Might never be hit, but just in case
+ if err != nil {
+ return PRVersion{}, err
+ }
+ v.VersionNum = num
+ v.IsNum = true
+ } else if containsOnly(s, alphanum) {
+ v.VersionStr = s
+ v.IsNum = false
+ } else {
+ return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
+ }
+ return v, nil
+}
+
+// IsNumeric checks if prerelease-version is numeric
+func (v PRVersion) IsNumeric() bool {
+ return v.IsNum
+}
+
+// Compare compares two PreRelease Versions v and o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v PRVersion) Compare(o PRVersion) int {
+ if v.IsNum && !o.IsNum {
+ return -1
+ } else if !v.IsNum && o.IsNum {
+ return 1
+ } else if v.IsNum && o.IsNum {
+ if v.VersionNum == o.VersionNum {
+ return 0
+ } else if v.VersionNum > o.VersionNum {
+ return 1
+ } else {
+ return -1
+ }
+ } else { // both are Alphas
+ if v.VersionStr == o.VersionStr {
+ return 0
+ } else if v.VersionStr > o.VersionStr {
+ return 1
+ } else {
+ return -1
+ }
+ }
+}
+
+// PreRelease version to string
+func (v PRVersion) String() string {
+ if v.IsNum {
+ return strconv.FormatUint(v.VersionNum, 10)
+ }
+ return v.VersionStr
+}
+
+func containsOnly(s string, set string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(set, r)
+ }) == -1
+}
+
+func hasLeadingZeroes(s string) bool {
+ return len(s) > 1 && s[0] == '0'
+}
+
+// NewBuildVersion creates a new valid build version
+func NewBuildVersion(s string) (string, error) {
+ if len(s) == 0 {
+ return "", errors.New("Buildversion is empty")
+ }
+ if !containsOnly(s, alphanum) {
+ return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
+ }
+ return s, nil
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/sort.go b/src/kube2msb/vendor/github.com/blang/semver/sort.go
new file mode 100644
index 0000000..e18f880
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/sort.go
@@ -0,0 +1,28 @@
+package semver
+
+import (
+ "sort"
+)
+
+// Versions represents multiple versions.
+type Versions []Version
+
+// Len returns length of version collection
+func (s Versions) Len() int {
+ return len(s)
+}
+
+// Swap swaps two versions inside the collection by its indices
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Less checks if version at index i is less than version at index j
+func (s Versions) Less(i, j int) bool {
+ return s[i].LT(s[j])
+}
+
+// Sort sorts a slice of versions
+func Sort(versions []Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/sql.go b/src/kube2msb/vendor/github.com/blang/semver/sql.go
new file mode 100644
index 0000000..eb4d802
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/sql.go
@@ -0,0 +1,30 @@
+package semver
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements the database/sql.Scanner interface.
+func (v *Version) Scan(src interface{}) (err error) {
+ var str string
+ switch src := src.(type) {
+ case string:
+ str = src
+ case []byte:
+ str = string(src)
+ default:
+ return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
+ }
+
+ if t, err := Parse(str); err == nil {
+ *v = t
+ }
+
+ return
+}
+
+// Value implements the database/sql/driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE b/src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE b/src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go
new file mode 100644
index 0000000..fd079b4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go
@@ -0,0 +1,7 @@
+package http
+
+import "net/http"
+
+type Client interface {
+ Do(*http.Request) (*http.Response, error)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go
new file mode 100644
index 0000000..f0d051b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go
@@ -0,0 +1,159 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+var (
+ log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
+)
+
+func WriteError(w http.ResponseWriter, code int, msg string) {
+ e := struct {
+ Error string `json:"error"`
+ }{
+ Error: msg,
+ }
+ b, err := json.Marshal(e)
+ if err != nil {
+ log.Errorf("Failed marshaling %#v to JSON: %v", e, err)
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(code)
+ w.Write(b)
+}
+
+// BasicAuth parses a username and password from the request's
+// Authorization header. This was pulled from golang master:
+// https://codereview.appspot.com/76540043
+func BasicAuth(r *http.Request) (username, password string, ok bool) {
+ auth := r.Header.Get("Authorization")
+ if auth == "" {
+ return
+ }
+
+ if !strings.HasPrefix(auth, "Basic ") {
+ return
+ }
+ c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
+ if err != nil {
+ return
+ }
+ cs := string(c)
+ s := strings.IndexByte(cs, ':')
+ if s < 0 {
+ return
+ }
+ return cs[:s], cs[s+1:], true
+}
+
+func cacheControlMaxAge(hdr string) (time.Duration, bool, error) {
+ for _, field := range strings.Split(hdr, ",") {
+ parts := strings.SplitN(strings.TrimSpace(field), "=", 2)
+ k := strings.ToLower(strings.TrimSpace(parts[0]))
+ if k != "max-age" {
+ continue
+ }
+
+ if len(parts) == 1 {
+ return 0, false, errors.New("max-age has no value")
+ }
+
+ v := strings.TrimSpace(parts[1])
+ if v == "" {
+ return 0, false, errors.New("max-age has empty value")
+ }
+
+ age, err := strconv.Atoi(v)
+ if err != nil {
+ return 0, false, err
+ }
+
+ if age <= 0 {
+ return 0, false, nil
+ }
+
+ return time.Duration(age) * time.Second, true, nil
+ }
+
+ return 0, false, nil
+}
+
+func expires(date, expires string) (time.Duration, bool, error) {
+ if date == "" || expires == "" {
+ return 0, false, nil
+ }
+
+ te, err := time.Parse(time.RFC1123, expires)
+ if err != nil {
+ return 0, false, err
+ }
+
+ td, err := time.Parse(time.RFC1123, date)
+ if err != nil {
+ return 0, false, err
+ }
+
+ ttl := te.Sub(td)
+
+ // headers indicate data already expired, caller should not
+ // have to care about this case
+ if ttl <= 0 {
+ return 0, false, nil
+ }
+
+ return ttl, true, nil
+}
+
+func Cacheable(hdr http.Header) (time.Duration, bool, error) {
+ ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control"))
+ if err != nil || ok {
+ return ttl, ok, err
+ }
+
+ return expires(hdr.Get("Date"), hdr.Get("Expires"))
+}
+
+// MergeQuery appends additional query values to an existing URL.
+func MergeQuery(u url.URL, q url.Values) url.URL {
+ uv := u.Query()
+ for k, vs := range q {
+ for _, v := range vs {
+ uv.Add(k, v)
+ }
+ }
+ u.RawQuery = uv.Encode()
+ return u
+}
+
+// NewResourceLocation appends a resource id to the end of the requested URL path.
+func NewResourceLocation(reqURL *url.URL, id string) string {
+ var u url.URL
+ u = *reqURL
+ u.Path = path.Join(u.Path, id)
+ u.RawQuery = ""
+ u.Fragment = ""
+ return u.String()
+}
+
+// CopyRequest returns a clone of the provided *http.Request.
+// The returned object is a shallow copy of the struct and a
+// deep copy of its Header field.
+func CopyRequest(r *http.Request) *http.Request {
+ r2 := *r
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return &r2
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go
new file mode 100644
index 0000000..270b3bc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go
@@ -0,0 +1,14 @@
+package http
+
+import (
+ "net/http"
+)
+
+type LoggingMiddleware struct {
+ Next http.Handler
+}
+
+func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ log.Infof("HTTP %s %v", r.Method, r.URL)
+ l.Next.ServeHTTP(w, r)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go
new file mode 100644
index 0000000..df60eb1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go
@@ -0,0 +1,29 @@
+package http
+
+import (
+ "errors"
+ "net/url"
+)
+
+// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty
+// since `url.Parse("")` does not return an error. Must contian a scheme and a host.
+func ParseNonEmptyURL(u string) (*url.URL, error) {
+ if u == "" {
+ return nil, errors.New("url is empty")
+ }
+
+ ur, err := url.Parse(u)
+ if err != nil {
+ return nil, err
+ }
+
+ if ur.Scheme == "" {
+ return nil, errors.New("url scheme is empty")
+ }
+
+ if ur.Host == "" {
+ return nil, errors.New("url host is empty")
+ }
+
+ return ur, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go
new file mode 100644
index 0000000..8b48bfd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go
@@ -0,0 +1,126 @@
+package jose
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "time"
+)
+
+type Claims map[string]interface{}
+
+func (c Claims) Add(name string, value interface{}) {
+ c[name] = value
+}
+
+func (c Claims) StringClaim(name string) (string, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return "", false, nil
+ }
+
+ v, ok := cl.(string)
+ if !ok {
+ return "", false, fmt.Errorf("unable to parse claim as string: %v", name)
+ }
+
+ return v, true, nil
+}
+
+func (c Claims) StringsClaim(name string) ([]string, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return nil, false, nil
+ }
+
+ if v, ok := cl.([]string); ok {
+ return v, true, nil
+ }
+
+ // When unmarshaled, []string will become []interface{}.
+ if v, ok := cl.([]interface{}); ok {
+ var ret []string
+ for _, vv := range v {
+ str, ok := vv.(string)
+ if !ok {
+ return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
+ }
+ ret = append(ret, str)
+ }
+ return ret, true, nil
+ }
+
+ return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
+}
+
+func (c Claims) Int64Claim(name string) (int64, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return 0, false, nil
+ }
+
+ v, ok := cl.(int64)
+ if !ok {
+ vf, ok := cl.(float64)
+ if !ok {
+ return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name)
+ }
+ v = int64(vf)
+ }
+
+ return v, true, nil
+}
+
+func (c Claims) Float64Claim(name string) (float64, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return 0, false, nil
+ }
+
+ v, ok := cl.(float64)
+ if !ok {
+ vi, ok := cl.(int64)
+ if !ok {
+ return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
+ }
+ v = float64(vi)
+ }
+
+ return v, true, nil
+}
+
+func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
+ v, ok, err := c.Float64Claim(name)
+ if !ok || err != nil {
+ return time.Time{}, ok, err
+ }
+
+ s := math.Trunc(v)
+ ns := (v - s) * math.Pow(10, 9)
+ return time.Unix(int64(s), int64(ns)).UTC(), true, nil
+}
+
+func decodeClaims(payload []byte) (Claims, error) {
+ var c Claims
+ if err := json.Unmarshal(payload, &c); err != nil {
+ return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err)
+ }
+ return c, nil
+}
+
+func marshalClaims(c Claims) ([]byte, error) {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func encodeClaims(c Claims) (string, error) {
+ b, err := marshalClaims(c)
+ if err != nil {
+ return "", err
+ }
+
+ return encodeSegment(b), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go
new file mode 100644
index 0000000..6209926
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go
@@ -0,0 +1,112 @@
+package jose
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const (
+ HeaderMediaType = "typ"
+ HeaderKeyAlgorithm = "alg"
+ HeaderKeyID = "kid"
+)
+
+const (
+ // Encryption Algorithm Header Parameter Values for JWS
+ // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
+ AlgHS256 = "HS256"
+ AlgHS384 = "HS384"
+ AlgHS512 = "HS512"
+ AlgRS256 = "RS256"
+ AlgRS384 = "RS384"
+ AlgRS512 = "RS512"
+ AlgES256 = "ES256"
+ AlgES384 = "ES384"
+ AlgES512 = "ES512"
+ AlgPS256 = "PS256"
+ AlgPS384 = "PS384"
+ AlgPS512 = "PS512"
+ AlgNone = "none"
+)
+
+const (
+ // Algorithm Header Parameter Values for JWE
+ // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
+ AlgRSA15 = "RSA1_5"
+ AlgRSAOAEP = "RSA-OAEP"
+ AlgRSAOAEP256 = "RSA-OAEP-256"
+ AlgA128KW = "A128KW"
+ AlgA192KW = "A192KW"
+ AlgA256KW = "A256KW"
+ AlgDir = "dir"
+ AlgECDHES = "ECDH-ES"
+ AlgECDHESA128KW = "ECDH-ES+A128KW"
+ AlgECDHESA192KW = "ECDH-ES+A192KW"
+ AlgECDHESA256KW = "ECDH-ES+A256KW"
+ AlgA128GCMKW = "A128GCMKW"
+ AlgA192GCMKW = "A192GCMKW"
+ AlgA256GCMKW = "A256GCMKW"
+ AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
+ AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
+ AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
+)
+
+const (
+ // Encryption Algorithm Header Parameter Values for JWE
+ // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
+ EncA128CBCHS256 = "A128CBC-HS256"
+ EncA128CBCHS384 = "A128CBC-HS384"
+ EncA256CBCHS512 = "A256CBC-HS512"
+ EncA128GCM = "A128GCM"
+ EncA192GCM = "A192GCM"
+ EncA256GCM = "A256GCM"
+)
+
+type JOSEHeader map[string]string
+
+func (j JOSEHeader) Validate() error {
+ if _, exists := j[HeaderKeyAlgorithm]; !exists {
+ return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm)
+ }
+
+ return nil
+}
+
+func decodeHeader(seg string) (JOSEHeader, error) {
+ b, err := decodeSegment(seg)
+ if err != nil {
+ return nil, err
+ }
+
+ var h JOSEHeader
+ err = json.Unmarshal(b, &h)
+ if err != nil {
+ return nil, err
+ }
+
+ return h, nil
+}
+
+func encodeHeader(h JOSEHeader) (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+
+ return encodeSegment(b), nil
+}
+
+// Decode JWT specific base64url encoding with padding stripped
+func decodeSegment(seg string) ([]byte, error) {
+ if l := len(seg) % 4; l != 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ return base64.URLEncoding.DecodeString(seg)
+}
+
+// Encode JWT specific base64url encoding with padding stripped
+func encodeSegment(seg []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go
new file mode 100644
index 0000000..b7a8e23
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go
@@ -0,0 +1,135 @@
+package jose
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/json"
+ "math/big"
+ "strings"
+)
+
+// JSON Web Key
+// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5
+type JWK struct {
+ ID string
+ Type string
+ Alg string
+ Use string
+ Exponent int
+ Modulus *big.Int
+ Secret []byte
+}
+
+type jwkJSON struct {
+ ID string `json:"kid"`
+ Type string `json:"kty"`
+ Alg string `json:"alg"`
+ Use string `json:"use"`
+ Exponent string `json:"e"`
+ Modulus string `json:"n"`
+}
+
+func (j *JWK) MarshalJSON() ([]byte, error) {
+ t := jwkJSON{
+ ID: j.ID,
+ Type: j.Type,
+ Alg: j.Alg,
+ Use: j.Use,
+ Exponent: encodeExponent(j.Exponent),
+ Modulus: encodeModulus(j.Modulus),
+ }
+
+ return json.Marshal(&t)
+}
+
+func (j *JWK) UnmarshalJSON(data []byte) error {
+ var t jwkJSON
+ err := json.Unmarshal(data, &t)
+ if err != nil {
+ return err
+ }
+
+ e, err := decodeExponent(t.Exponent)
+ if err != nil {
+ return err
+ }
+
+ n, err := decodeModulus(t.Modulus)
+ if err != nil {
+ return err
+ }
+
+ j.ID = t.ID
+ j.Type = t.Type
+ j.Alg = t.Alg
+ j.Use = t.Use
+ j.Exponent = e
+ j.Modulus = n
+
+ return nil
+}
+
+type JWKSet struct {
+ Keys []JWK `json:"keys"`
+}
+
+func decodeExponent(e string) (int, error) {
+ decE, err := decodeBase64URLPaddingOptional(e)
+ if err != nil {
+ return 0, err
+ }
+ var eBytes []byte
+ if len(decE) < 8 {
+ eBytes = make([]byte, 8-len(decE), 8)
+ eBytes = append(eBytes, decE...)
+ } else {
+ eBytes = decE
+ }
+ eReader := bytes.NewReader(eBytes)
+ var E uint64
+ err = binary.Read(eReader, binary.BigEndian, &E)
+ if err != nil {
+ return 0, err
+ }
+ return int(E), nil
+}
+
+func encodeExponent(e int) string {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(e))
+ var idx int
+ for ; idx < 8; idx++ {
+ if b[idx] != 0x0 {
+ break
+ }
+ }
+ return base64.URLEncoding.EncodeToString(b[idx:])
+}
+
+// Turns a URL encoded modulus of a key into a big int.
+func decodeModulus(n string) (*big.Int, error) {
+ decN, err := decodeBase64URLPaddingOptional(n)
+ if err != nil {
+ return nil, err
+ }
+ N := big.NewInt(0)
+ N.SetBytes(decN)
+ return N, nil
+}
+
+func encodeModulus(n *big.Int) string {
+ return base64.URLEncoding.EncodeToString(n.Bytes())
+}
+
+// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.
+// The stdlib version currently doesn't handle this.
+// We can get rid of this is if this bug:
+// https://github.com/golang/go/issues/4237
+// ever closes.
+func decodeBase64URLPaddingOptional(e string) ([]byte, error) {
+ if m := len(e) % 4; m != 0 {
+ e += strings.Repeat("=", 4-m)
+ }
+ return base64.URLEncoding.DecodeString(e)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go
new file mode 100644
index 0000000..1049ece
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go
@@ -0,0 +1,51 @@
+package jose
+
+import (
+ "fmt"
+ "strings"
+)
+
+type JWS struct {
+ RawHeader string
+ Header JOSEHeader
+ RawPayload string
+ Payload []byte
+ Signature []byte
+}
+
+// Given a raw encoded JWS token parses it and verifies the structure.
+func ParseJWS(raw string) (JWS, error) {
+ parts := strings.Split(raw, ".")
+ if len(parts) != 3 {
+ return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts))
+ }
+
+ rawSig := parts[2]
+ jws := JWS{
+ RawHeader: parts[0],
+ RawPayload: parts[1],
+ }
+
+ header, err := decodeHeader(jws.RawHeader)
+ if err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err)
+ }
+ if err = header.Validate(); err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, %s", err)
+ }
+ jws.Header = header
+
+ payload, err := decodeSegment(jws.RawPayload)
+ if err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err)
+ }
+ jws.Payload = payload
+
+ sig, err := decodeSegment(rawSig)
+ if err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err)
+ }
+ jws.Signature = sig
+
+ return jws, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go
new file mode 100644
index 0000000..3b3e963
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go
@@ -0,0 +1,82 @@
+package jose
+
+import "strings"
+
+type JWT JWS
+
+func ParseJWT(token string) (jwt JWT, err error) {
+ jws, err := ParseJWS(token)
+ if err != nil {
+ return
+ }
+
+ return JWT(jws), nil
+}
+
+func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) {
+ jwt = JWT{}
+
+ jwt.Header = header
+ jwt.Header[HeaderMediaType] = "JWT"
+
+ claimBytes, err := marshalClaims(claims)
+ if err != nil {
+ return
+ }
+ jwt.Payload = claimBytes
+
+ eh, err := encodeHeader(header)
+ if err != nil {
+ return
+ }
+ jwt.RawHeader = eh
+
+ ec, err := encodeClaims(claims)
+ if err != nil {
+ return
+ }
+ jwt.RawPayload = ec
+
+ return
+}
+
+func (j *JWT) KeyID() (string, bool) {
+ kID, ok := j.Header[HeaderKeyID]
+ return kID, ok
+}
+
+func (j *JWT) Claims() (Claims, error) {
+ return decodeClaims(j.Payload)
+}
+
+// Encoded data part of the token which may be signed.
+func (j *JWT) Data() string {
+ return strings.Join([]string{j.RawHeader, j.RawPayload}, ".")
+}
+
+// Full encoded JWT token string in format: header.claims.signature
+func (j *JWT) Encode() string {
+ d := j.Data()
+ s := encodeSegment(j.Signature)
+ return strings.Join([]string{d, s}, ".")
+}
+
+func NewSignedJWT(claims Claims, s Signer) (*JWT, error) {
+ header := JOSEHeader{
+ HeaderKeyAlgorithm: s.Alg(),
+ HeaderKeyID: s.ID(),
+ }
+
+ jwt, err := NewJWT(header, claims)
+ if err != nil {
+ return nil, err
+ }
+
+ sig, err := s.Sign([]byte(jwt.Data()))
+ if err != nil {
+ return nil, err
+ }
+ jwt.Signature = sig
+
+ return &jwt, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go
new file mode 100644
index 0000000..7b2b253
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go
@@ -0,0 +1,24 @@
+package jose
+
+import (
+ "fmt"
+)
+
+type Verifier interface {
+ ID() string
+ Alg() string
+ Verify(sig []byte, data []byte) error
+}
+
+type Signer interface {
+ Verifier
+ Sign(data []byte) (sig []byte, err error)
+}
+
+func NewVerifier(jwk JWK) (Verifier, error) {
+ if jwk.Type != "RSA" {
+ return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
+ }
+
+ return NewVerifierRSA(jwk)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go
new file mode 100644
index 0000000..b3ca3ef
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go
@@ -0,0 +1,67 @@
+package jose
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/hmac"
+ _ "crypto/sha256"
+ "errors"
+ "fmt"
+)
+
+type VerifierHMAC struct {
+ KeyID string
+ Hash crypto.Hash
+ Secret []byte
+}
+
+type SignerHMAC struct {
+ VerifierHMAC
+}
+
+func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
+ if jwk.Alg != "" && jwk.Alg != "HS256" {
+ return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
+ }
+
+ v := VerifierHMAC{
+ KeyID: jwk.ID,
+ Secret: jwk.Secret,
+ Hash: crypto.SHA256,
+ }
+
+ return &v, nil
+}
+
+func (v *VerifierHMAC) ID() string {
+ return v.KeyID
+}
+
+func (v *VerifierHMAC) Alg() string {
+ return "HS256"
+}
+
+func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
+ h := hmac.New(v.Hash.New, v.Secret)
+ h.Write(data)
+ if !bytes.Equal(sig, h.Sum(nil)) {
+ return errors.New("invalid hmac signature")
+ }
+ return nil
+}
+
+func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
+ return &SignerHMAC{
+ VerifierHMAC: VerifierHMAC{
+ KeyID: kid,
+ Secret: secret,
+ Hash: crypto.SHA256,
+ },
+ }
+}
+
+func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
+ h := hmac.New(s.Hash.New, s.Secret)
+ h.Write(data)
+ return h.Sum(nil), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go
new file mode 100644
index 0000000..004e45d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go
@@ -0,0 +1,67 @@
+package jose
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+)
+
+type VerifierRSA struct {
+ KeyID string
+ Hash crypto.Hash
+ PublicKey rsa.PublicKey
+}
+
+type SignerRSA struct {
+ PrivateKey rsa.PrivateKey
+ VerifierRSA
+}
+
+func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
+ if jwk.Alg != "" && jwk.Alg != "RS256" {
+ return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
+ }
+
+ v := VerifierRSA{
+ KeyID: jwk.ID,
+ PublicKey: rsa.PublicKey{
+ N: jwk.Modulus,
+ E: jwk.Exponent,
+ },
+ Hash: crypto.SHA256,
+ }
+
+ return &v, nil
+}
+
+func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA {
+ return &SignerRSA{
+ PrivateKey: key,
+ VerifierRSA: VerifierRSA{
+ KeyID: kid,
+ PublicKey: key.PublicKey,
+ Hash: crypto.SHA256,
+ },
+ }
+}
+
+func (v *VerifierRSA) ID() string {
+ return v.KeyID
+}
+
+func (v *VerifierRSA) Alg() string {
+ return "RS256"
+}
+
+func (v *VerifierRSA) Verify(sig []byte, data []byte) error {
+ h := v.Hash.New()
+ h.Write(data)
+ return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig)
+}
+
+func (s *SignerRSA) Sign(data []byte) ([]byte, error) {
+ h := s.Hash.New()
+ h.Write(data)
+ return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil))
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go
new file mode 100644
index 0000000..d0142a9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go
@@ -0,0 +1,153 @@
+package key
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/base64"
+ "encoding/json"
+ "math/big"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+)
+
+func NewPublicKey(jwk jose.JWK) *PublicKey {
+ return &PublicKey{jwk: jwk}
+}
+
+type PublicKey struct {
+ jwk jose.JWK
+}
+
+func (k *PublicKey) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&k.jwk)
+}
+
+func (k *PublicKey) UnmarshalJSON(data []byte) error {
+ var jwk jose.JWK
+ if err := json.Unmarshal(data, &jwk); err != nil {
+ return err
+ }
+ k.jwk = jwk
+ return nil
+}
+
+func (k *PublicKey) ID() string {
+ return k.jwk.ID
+}
+
+func (k *PublicKey) Verifier() (jose.Verifier, error) {
+ return jose.NewVerifierRSA(k.jwk)
+}
+
+type PrivateKey struct {
+ KeyID string
+ PrivateKey *rsa.PrivateKey
+}
+
+func (k *PrivateKey) ID() string {
+ return k.KeyID
+}
+
+func (k *PrivateKey) Signer() jose.Signer {
+ return jose.NewSignerRSA(k.ID(), *k.PrivateKey)
+}
+
+func (k *PrivateKey) JWK() jose.JWK {
+ return jose.JWK{
+ ID: k.KeyID,
+ Type: "RSA",
+ Alg: "RS256",
+ Use: "sig",
+ Exponent: k.PrivateKey.PublicKey.E,
+ Modulus: k.PrivateKey.PublicKey.N,
+ }
+}
+
+type KeySet interface {
+ ExpiresAt() time.Time
+}
+
+type PublicKeySet struct {
+ keys []PublicKey
+ index map[string]*PublicKey
+ expiresAt time.Time
+}
+
+func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet {
+ keys := make([]PublicKey, len(jwks))
+ index := make(map[string]*PublicKey)
+ for i, jwk := range jwks {
+ keys[i] = *NewPublicKey(jwk)
+ index[keys[i].ID()] = &keys[i]
+ }
+ return &PublicKeySet{
+ keys: keys,
+ index: index,
+ expiresAt: exp,
+ }
+}
+
+func (s *PublicKeySet) ExpiresAt() time.Time {
+ return s.expiresAt
+}
+
+func (s *PublicKeySet) Keys() []PublicKey {
+ return s.keys
+}
+
+func (s *PublicKeySet) Key(id string) *PublicKey {
+ return s.index[id]
+}
+
+type PrivateKeySet struct {
+ keys []*PrivateKey
+ ActiveKeyID string
+ expiresAt time.Time
+}
+
+func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet {
+ return &PrivateKeySet{
+ keys: keys,
+ ActiveKeyID: keys[0].ID(),
+ expiresAt: exp.UTC(),
+ }
+}
+
+func (s *PrivateKeySet) Keys() []*PrivateKey {
+ return s.keys
+}
+
+func (s *PrivateKeySet) ExpiresAt() time.Time {
+ return s.expiresAt
+}
+
+func (s *PrivateKeySet) Active() *PrivateKey {
+ for i, k := range s.keys {
+ if k.ID() == s.ActiveKeyID {
+ return s.keys[i]
+ }
+ }
+
+ return nil
+}
+
+type GeneratePrivateKeyFunc func() (*PrivateKey, error)
+
+func GeneratePrivateKey() (*PrivateKey, error) {
+ pk, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, err
+ }
+
+ k := PrivateKey{
+ KeyID: base64BigInt(pk.PublicKey.N),
+ PrivateKey: pk,
+ }
+
+ return &k, nil
+}
+
+func base64BigInt(b *big.Int) string {
+ return base64.URLEncoding.EncodeToString(b.Bytes())
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go
new file mode 100644
index 0000000..476ab6a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go
@@ -0,0 +1,99 @@
+package key
+
+import (
+ "errors"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/pkg/health"
+)
+
+type PrivateKeyManager interface {
+ ExpiresAt() time.Time
+ Signer() (jose.Signer, error)
+ JWKs() ([]jose.JWK, error)
+ PublicKeys() ([]PublicKey, error)
+
+ WritableKeySetRepo
+ health.Checkable
+}
+
+func NewPrivateKeyManager() PrivateKeyManager {
+ return &privateKeyManager{
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+type privateKeyManager struct {
+ keySet *PrivateKeySet
+ clock clockwork.Clock
+}
+
+func (m *privateKeyManager) ExpiresAt() time.Time {
+ if m.keySet == nil {
+ return m.clock.Now().UTC()
+ }
+
+ return m.keySet.ExpiresAt()
+}
+
+func (m *privateKeyManager) Signer() (jose.Signer, error) {
+ if err := m.Healthy(); err != nil {
+ return nil, err
+ }
+
+ return m.keySet.Active().Signer(), nil
+}
+
+func (m *privateKeyManager) JWKs() ([]jose.JWK, error) {
+ if err := m.Healthy(); err != nil {
+ return nil, err
+ }
+
+ keys := m.keySet.Keys()
+ jwks := make([]jose.JWK, len(keys))
+ for i, k := range keys {
+ jwks[i] = k.JWK()
+ }
+ return jwks, nil
+}
+
+func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) {
+ jwks, err := m.JWKs()
+ if err != nil {
+ return nil, err
+ }
+ keys := make([]PublicKey, len(jwks))
+ for i, jwk := range jwks {
+ keys[i] = *NewPublicKey(jwk)
+ }
+ return keys, nil
+}
+
+func (m *privateKeyManager) Healthy() error {
+ if m.keySet == nil {
+ return errors.New("private key manager uninitialized")
+ }
+
+ if len(m.keySet.Keys()) == 0 {
+ return errors.New("private key manager zero keys")
+ }
+
+ if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) {
+ return errors.New("private key manager keys expired")
+ }
+
+ return nil
+}
+
+func (m *privateKeyManager) Set(keySet KeySet) error {
+ privKeySet, ok := keySet.(*PrivateKeySet)
+ if !ok {
+ return errors.New("unable to cast to PrivateKeySet")
+ }
+
+ m.keySet = privKeySet
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go
new file mode 100644
index 0000000..1acdeb3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go
@@ -0,0 +1,55 @@
+package key
+
+import (
+ "errors"
+ "sync"
+)
+
+var ErrorNoKeys = errors.New("no keys found")
+
+type WritableKeySetRepo interface {
+ Set(KeySet) error
+}
+
+type ReadableKeySetRepo interface {
+ Get() (KeySet, error)
+}
+
+type PrivateKeySetRepo interface {
+ WritableKeySetRepo
+ ReadableKeySetRepo
+}
+
+func NewPrivateKeySetRepo() PrivateKeySetRepo {
+ return &memPrivateKeySetRepo{}
+}
+
+type memPrivateKeySetRepo struct {
+ mu sync.RWMutex
+ pks PrivateKeySet
+}
+
+func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
+ pks, ok := ks.(*PrivateKeySet)
+ if !ok {
+ return errors.New("unable to cast to PrivateKeySet")
+ } else if pks == nil {
+ return errors.New("nil KeySet")
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.pks = *pks
+ return nil
+}
+
+func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.pks.keys == nil {
+ return nil, ErrorNoKeys
+ }
+ return KeySet(&r.pks), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go
new file mode 100644
index 0000000..9c5508b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go
@@ -0,0 +1,165 @@
+package key
+
+import (
+ "errors"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ ptime "github.com/coreos/pkg/timeutil"
+ "github.com/jonboulle/clockwork"
+)
+
+var (
+ log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "key")
+
+ ErrorPrivateKeysExpired = errors.New("private keys have expired")
+)
+
+func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator {
+ return &PrivateKeyRotator{
+ repo: repo,
+ ttl: ttl,
+
+ keep: 2,
+ generateKey: GeneratePrivateKey,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+type PrivateKeyRotator struct {
+ repo PrivateKeySetRepo
+ generateKey GeneratePrivateKeyFunc
+ clock clockwork.Clock
+ keep int
+ ttl time.Duration
+}
+
+func (r *PrivateKeyRotator) expiresAt() time.Time {
+ return r.clock.Now().UTC().Add(r.ttl)
+}
+
+func (r *PrivateKeyRotator) Healthy() error {
+ pks, err := r.privateKeySet()
+ if err != nil {
+ return err
+ }
+
+ if r.clock.Now().After(pks.ExpiresAt()) {
+ return ErrorPrivateKeysExpired
+ }
+
+ return nil
+}
+
+func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) {
+ ks, err := r.repo.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ pks, ok := ks.(*PrivateKeySet)
+ if !ok {
+ return nil, errors.New("unable to cast to PrivateKeySet")
+ }
+ return pks, nil
+}
+
+func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) {
+ pks, err := r.privateKeySet()
+ if err == ErrorNoKeys {
+ log.Infof("No keys in private key set; must rotate immediately")
+ return 0, nil
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ now := r.clock.Now()
+
+ // Ideally, we want to rotate after half the TTL has elapsed.
+ idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2)
+
+ // If we are past the ideal rotation time, rotate immediatly.
+ return max(0, idealRotationTime.Sub(now)), nil
+}
+
+func max(a, b time.Duration) time.Duration {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func (r *PrivateKeyRotator) Run() chan struct{} {
+ attempt := func() {
+ k, err := r.generateKey()
+ if err != nil {
+ log.Errorf("Failed generating signing key: %v", err)
+ return
+ }
+
+ exp := r.expiresAt()
+ if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil {
+ log.Errorf("Failed key rotation: %v", err)
+ return
+ }
+
+ log.Infof("Rotated signing keys: id=%s expiresAt=%s", k.ID(), exp)
+ }
+
+ stop := make(chan struct{})
+ go func() {
+ for {
+ var nextRotation time.Duration
+ var sleep time.Duration
+ var err error
+ for {
+ if nextRotation, err = r.nextRotation(); err == nil {
+ break
+ }
+ sleep = ptime.ExpBackoff(sleep, time.Minute)
+ log.Errorf("error getting nextRotation, retrying in %v: %v", sleep, err)
+ time.Sleep(sleep)
+ }
+
+ log.Infof("will rotate keys in %v", nextRotation)
+ select {
+ case <-r.clock.After(nextRotation):
+ attempt()
+ case <-stop:
+ return
+ }
+ }
+ }()
+
+ return stop
+}
+
+func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error {
+ ks, err := repo.Get()
+ if err != nil && err != ErrorNoKeys {
+ return err
+ }
+
+ var keys []*PrivateKey
+ if ks != nil {
+ pks, ok := ks.(*PrivateKeySet)
+ if !ok {
+ return errors.New("unable to cast to PrivateKeySet")
+ }
+ keys = pks.Keys()
+ }
+
+ keys = append([]*PrivateKey{k}, keys...)
+ if l := len(keys); l > keep {
+ keys = keys[0:keep]
+ }
+
+ nks := PrivateKeySet{
+ keys: keys,
+ ActiveKeyID: k.ID(),
+ expiresAt: exp,
+ }
+
+ return repo.Set(KeySet(&nks))
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go
new file mode 100644
index 0000000..e8d5d03
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go
@@ -0,0 +1,91 @@
+package key
+
+import (
+ "errors"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "github.com/coreos/pkg/timeutil"
+)
+
+func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer {
+ return &KeySetSyncer{
+ readable: r,
+ writable: w,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+type KeySetSyncer struct {
+ readable ReadableKeySetRepo
+ writable WritableKeySetRepo
+ clock clockwork.Clock
+}
+
+func (s *KeySetSyncer) Run() chan struct{} {
+ stop := make(chan struct{})
+ go func() {
+ var failing bool
+ var next time.Duration
+ for {
+ exp, err := syncKeySet(s.readable, s.writable, s.clock)
+ if err != nil || exp == 0 {
+ if !failing {
+ failing = true
+ next = time.Second
+ } else {
+ next = timeutil.ExpBackoff(next, time.Minute)
+ }
+ if exp == 0 {
+ log.Errorf("Synced to already expired key set, retrying in %v: %v", next, err)
+
+ } else {
+ log.Errorf("Failed syncing key set, retrying in %v: %v", next, err)
+ }
+ } else {
+ failing = false
+ next = exp / 2
+ log.Infof("Synced key set, checking again in %v", next)
+ }
+
+ select {
+ case <-s.clock.After(next):
+ continue
+ case <-stop:
+ return
+ }
+ }
+ }()
+
+ return stop
+}
+
+func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
+ return syncKeySet(r, w, clockwork.NewRealClock())
+}
+
+// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
+// If keyset has already expired, returns a zero duration.
+func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
+ var ks KeySet
+ ks, err = r.Get()
+ if err != nil {
+ return
+ }
+
+ if ks == nil {
+ err = errors.New("no source KeySet")
+ return
+ }
+
+ if err = w.Set(ks); err != nil {
+ return
+ }
+
+ now := clock.Now()
+ if ks.ExpiresAt().After(now) {
+ exp = ks.ExpiresAt().Sub(now)
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go
new file mode 100644
index 0000000..50d8909
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go
@@ -0,0 +1,29 @@
+package oauth2
+
+const (
+ ErrorAccessDenied = "access_denied"
+ ErrorInvalidClient = "invalid_client"
+ ErrorInvalidGrant = "invalid_grant"
+ ErrorInvalidRequest = "invalid_request"
+ ErrorServerError = "server_error"
+ ErrorUnauthorizedClient = "unauthorized_client"
+ ErrorUnsupportedGrantType = "unsupported_grant_type"
+ ErrorUnsupportedResponseType = "unsupported_response_type"
+)
+
+type Error struct {
+ Type string `json:"error"`
+ Description string `json:"error_description,omitempty"`
+ State string `json:"state,omitempty"`
+}
+
+func (e *Error) Error() string {
+ if e.Description != "" {
+ return e.Type + ": " + e.Description
+ }
+ return e.Type
+}
+
+func NewError(typ string) *Error {
+ return &Error{Type: typ}
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go
new file mode 100644
index 0000000..1c68293
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go
@@ -0,0 +1,416 @@
+package oauth2
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ phttp "github.com/coreos/go-oidc/http"
+)
+
+// ResponseTypesEqual compares two response_type values. If either
+// contains a space, it is treated as an unordered list. For example,
+// comparing "code id_token" and "id_token code" would evaluate to true.
+func ResponseTypesEqual(r1, r2 string) bool {
+ if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
+ // fast route, no split needed
+ return r1 == r2
+ }
+
+ // split, sort, and compare
+ r1Fields := strings.Fields(r1)
+ r2Fields := strings.Fields(r2)
+ if len(r1Fields) != len(r2Fields) {
+ return false
+ }
+ sort.Strings(r1Fields)
+ sort.Strings(r2Fields)
+ for i, r1Field := range r1Fields {
+ if r1Field != r2Fields[i] {
+ return false
+ }
+ }
+ return true
+}
+
+const (
+ // OAuth2.0 response types registered by OIDC.
+ //
+ // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
+ ResponseTypeCode = "code"
+ ResponseTypeCodeIDToken = "code id_token"
+ ResponseTypeCodeIDTokenToken = "code id_token token"
+ ResponseTypeIDToken = "id_token"
+ ResponseTypeIDTokenToken = "id_token token"
+ ResponseTypeToken = "token"
+ ResponseTypeNone = "none"
+)
+
+const (
+ GrantTypeAuthCode = "authorization_code"
+ GrantTypeClientCreds = "client_credentials"
+ GrantTypeUserCreds = "password"
+ GrantTypeImplicit = "implicit"
+ GrantTypeRefreshToken = "refresh_token"
+
+ AuthMethodClientSecretPost = "client_secret_post"
+ AuthMethodClientSecretBasic = "client_secret_basic"
+ AuthMethodClientSecretJWT = "client_secret_jwt"
+ AuthMethodPrivateKeyJWT = "private_key_jwt"
+)
+
+type Config struct {
+ Credentials ClientCredentials
+ Scope []string
+ RedirectURL string
+ AuthURL string
+ TokenURL string
+
+ // Must be one of the AuthMethodXXX methods above. Right now, only
+ // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported.
+ AuthMethod string
+}
+
+type Client struct {
+ hc phttp.Client
+ creds ClientCredentials
+ scope []string
+ authURL *url.URL
+ redirectURL *url.URL
+ tokenURL *url.URL
+ authMethod string
+}
+
+type ClientCredentials struct {
+ ID string
+ Secret string
+}
+
+func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
+ if len(cfg.Credentials.ID) == 0 {
+ err = errors.New("missing client id")
+ return
+ }
+
+ if len(cfg.Credentials.Secret) == 0 {
+ err = errors.New("missing client secret")
+ return
+ }
+
+ if cfg.AuthMethod == "" {
+ cfg.AuthMethod = AuthMethodClientSecretBasic
+ } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic {
+ err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod)
+ return
+ }
+
+ au, err := phttp.ParseNonEmptyURL(cfg.AuthURL)
+ if err != nil {
+ return
+ }
+
+ tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL)
+ if err != nil {
+ return
+ }
+
+ // Allow empty redirect URL in the case where the client
+ // only needs to verify a given token.
+ ru, err := url.Parse(cfg.RedirectURL)
+ if err != nil {
+ return
+ }
+
+ c = &Client{
+ creds: cfg.Credentials,
+ scope: cfg.Scope,
+ redirectURL: ru,
+ authURL: au,
+ tokenURL: tu,
+ hc: hc,
+ authMethod: cfg.AuthMethod,
+ }
+
+ return
+}
+
+// Return the embedded HTTP client
+func (c *Client) HttpClient() phttp.Client {
+ return c.hc
+}
+
+// Generate the url for initial redirect to oauth provider.
+func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
+ v := c.commonURLValues()
+ v.Set("state", state)
+ if strings.ToLower(accessType) == "offline" {
+ v.Set("access_type", "offline")
+ }
+
+ if prompt != "" {
+ v.Set("prompt", prompt)
+ }
+ v.Set("response_type", "code")
+
+ q := v.Encode()
+ u := *c.authURL
+ if u.RawQuery == "" {
+ u.RawQuery = q
+ } else {
+ u.RawQuery += "&" + q
+ }
+ return u.String()
+}
+
+func (c *Client) commonURLValues() url.Values {
+ return url.Values{
+ "redirect_uri": {c.redirectURL.String()},
+ "scope": {strings.Join(c.scope, " ")},
+ "client_id": {c.creds.ID},
+ }
+}
+
+func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
+ var req *http.Request
+ var err error
+ switch c.authMethod {
+ case AuthMethodClientSecretPost:
+ values.Set("client_secret", c.creds.Secret)
+ req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ case AuthMethodClientSecretBasic:
+ req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ encodedID := url.QueryEscape(c.creds.ID)
+ encodedSecret := url.QueryEscape(c.creds.Secret)
+ req.SetBasicAuth(encodedID, encodedSecret)
+ default:
+ panic("misconfigured client: auth method not supported")
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return req, nil
+
+}
+
+// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type.
+// May not be supported by all OAuth2 servers.
+func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) {
+ v := url.Values{
+ "scope": {strings.Join(scope, " ")},
+ "grant_type": {GrantTypeClientCreds},
+ }
+
+ req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
+ if err != nil {
+ return
+ }
+
+ resp, err := c.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ return parseTokenResponse(resp)
+}
+
+// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
+// May not be supported by all OAuth2 servers.
+func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
+ v := url.Values{
+ "scope": {strings.Join(c.scope, " ")},
+ "grant_type": {GrantTypeUserCreds},
+ "username": {username},
+ "password": {password},
+ }
+
+ req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
+ if err != nil {
+ return
+ }
+
+ resp, err := c.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ return parseTokenResponse(resp)
+}
+
+// RequestToken requests a token from the Token Endpoint with the specified grantType.
+// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
+// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.
+func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) {
+ v := c.commonURLValues()
+
+ v.Set("grant_type", grantType)
+ v.Set("client_secret", c.creds.Secret)
+ switch grantType {
+ case GrantTypeAuthCode:
+ v.Set("code", value)
+ case GrantTypeRefreshToken:
+ v.Set("refresh_token", value)
+ default:
+ err = fmt.Errorf("unsupported grant_type: %v", grantType)
+ return
+ }
+
+ req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
+ if err != nil {
+ return
+ }
+
+ resp, err := c.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ return parseTokenResponse(resp)
+}
+
+func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return
+ }
+ badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299
+
+ contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
+ if err != nil {
+ return
+ }
+
+ result = TokenResponse{
+ RawBody: body,
+ }
+
+ newError := func(typ, desc, state string) error {
+ if typ == "" {
+ return fmt.Errorf("unrecognized error %s", body)
+ }
+ return &Error{typ, desc, state}
+ }
+
+ if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" {
+ var vals url.Values
+ vals, err = url.ParseQuery(string(body))
+ if err != nil {
+ return
+ }
+ if error := vals.Get("error"); error != "" || badStatusCode {
+ err = newError(error, vals.Get("error_description"), vals.Get("state"))
+ return
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ e = vals.Get("expires")
+ }
+ if e != "" {
+ result.Expires, err = strconv.Atoi(e)
+ if err != nil {
+ return
+ }
+ }
+ result.AccessToken = vals.Get("access_token")
+ result.TokenType = vals.Get("token_type")
+ result.IDToken = vals.Get("id_token")
+ result.RefreshToken = vals.Get("refresh_token")
+ result.Scope = vals.Get("scope")
+ } else {
+ var r struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ RefreshToken string `json:"refresh_token"`
+ Scope string `json:"scope"`
+ State string `json:"state"`
+ ExpiresIn int `json:"expires_in"`
+ Expires int `json:"expires"`
+ Error string `json:"error"`
+ Desc string `json:"error_description"`
+ }
+ if err = json.Unmarshal(body, &r); err != nil {
+ return
+ }
+ if r.Error != "" || badStatusCode {
+ err = newError(r.Error, r.Desc, r.State)
+ return
+ }
+ result.AccessToken = r.AccessToken
+ result.TokenType = r.TokenType
+ result.IDToken = r.IDToken
+ result.RefreshToken = r.RefreshToken
+ result.Scope = r.Scope
+ if r.ExpiresIn == 0 {
+ result.Expires = r.Expires
+ } else {
+ result.Expires = r.ExpiresIn
+ }
+ }
+ return
+}
+
+type TokenResponse struct {
+ AccessToken string
+ TokenType string
+ Expires int
+ IDToken string
+ RefreshToken string // OPTIONAL.
+ Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED.
+ RawBody []byte // In case callers need some other non-standard info from the token response
+}
+
+type AuthCodeRequest struct {
+ ResponseType string
+ ClientID string
+ RedirectURL *url.URL
+ Scope []string
+ State string
+}
+
+func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) {
+ acr := AuthCodeRequest{
+ ResponseType: q.Get("response_type"),
+ ClientID: q.Get("client_id"),
+ State: q.Get("state"),
+ Scope: make([]string, 0),
+ }
+
+ qs := strings.TrimSpace(q.Get("scope"))
+ if qs != "" {
+ acr.Scope = strings.Split(qs, " ")
+ }
+
+ err := func() error {
+ if acr.ClientID == "" {
+ return NewError(ErrorInvalidRequest)
+ }
+
+ redirectURL := q.Get("redirect_uri")
+ if redirectURL != "" {
+ ru, err := url.Parse(redirectURL)
+ if err != nil {
+ return NewError(ErrorInvalidRequest)
+ }
+ acr.RedirectURL = ru
+ }
+
+ return nil
+ }()
+
+ return acr, err
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go
new file mode 100644
index 0000000..7a3cb40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go
@@ -0,0 +1,846 @@
+package oidc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/mail"
+ "net/url"
+ "sync"
+ "time"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/key"
+ "github.com/coreos/go-oidc/oauth2"
+)
+
+const (
+ // amount of time that must pass after the last key sync
+ // completes before another attempt may begin
+ keySyncWindow = 5 * time.Second
+)
+
+var (
+ DefaultScope = []string{"openid", "email", "profile"}
+
+ supportedAuthMethods = map[string]struct{}{
+ oauth2.AuthMethodClientSecretBasic: struct{}{},
+ oauth2.AuthMethodClientSecretPost: struct{}{},
+ }
+)
+
+type ClientCredentials oauth2.ClientCredentials
+
+type ClientIdentity struct {
+ Credentials ClientCredentials
+ Metadata ClientMetadata
+}
+
+type JWAOptions struct {
+ // SigningAlg specifies an JWA alg for signing JWTs.
+ //
+ // Specifying this field implies different actions depending on the context. It may
+ // require objects be serialized and signed as a JWT instead of plain JSON, or
+ // require an existing JWT object use the specified alg.
+ //
+ // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
+ SigningAlg string
+ // EncryptionAlg, if provided, specifies that the returned or sent object be stored
+ // (or nested) within a JWT object and encrypted with the provided JWA alg.
+ EncryptionAlg string
+ // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
+ // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
+ // to A128CBC-HS256.
+ //
+ // If EncryptionEnc is provided EncryptionAlg must also be specified.
+ EncryptionEnc string
+}
+
+func (opt JWAOptions) valid() error {
+ if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
+ return errors.New("encryption encoding provided with no encryption algorithm")
+ }
+ return nil
+}
+
+func (opt JWAOptions) defaults() JWAOptions {
+ if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
+ opt.EncryptionEnc = jose.EncA128CBCHS256
+ }
+ return opt
+}
+
+var (
+ // Ensure ClientMetadata satisfies these interfaces.
+ _ json.Marshaler = &ClientMetadata{}
+ _ json.Unmarshaler = &ClientMetadata{}
+)
+
+// ClientMetadata holds metadata that the authorization server associates
+// with a client identifier. The fields range from human-facing display
+// strings such as client name, to items that impact the security of the
+// protocol, such as the list of valid redirect URIs.
+//
+// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
+//
+// TODO: support language specific claim representations
+// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
+type ClientMetadata struct {
+ RedirectURIs []url.URL // Required
+
+ // A list of OAuth 2.0 "response_type" values that the client wishes to restrict
+ // itself to. Either "code", "token", or another registered extension.
+ //
+ // If omitted, only "code" will be used.
+ ResponseTypes []string
+ // A list of OAuth 2.0 grant types the client wishes to restrict itself to.
+ // The grant type values used by OIDC are "authorization_code", "implicit",
+ // and "refresh_token".
+ //
+ // If ommitted, only "authorization_code" will be used.
+ GrantTypes []string
+ // "native" or "web". If omitted, "web".
+ ApplicationType string
+
+ // List of email addresses.
+ Contacts []mail.Address
+ // Name of client to be presented to the end-user.
+ ClientName string
+ // URL that references a logo for the Client application.
+ LogoURI *url.URL
+ // URL of the home page of the Client.
+ ClientURI *url.URL
+ // Profile data policies and terms of use to be provided to the end user.
+ PolicyURI *url.URL
+ TermsOfServiceURI *url.URL
+
+ // URL to or the value of the client's JSON Web Key Set document.
+ JWKSURI *url.URL
+ JWKS *jose.JWKSet
+
+ // URL referencing a flie with a single JSON array of redirect URIs.
+ SectorIdentifierURI *url.URL
+
+ SubjectType string
+
+ // Options to restrict the JWS alg and enc values used for server responses and requests.
+ IDTokenResponseOptions JWAOptions
+ UserInfoResponseOptions JWAOptions
+ RequestObjectOptions JWAOptions
+
+ // Client requested authorization method and signing options for the token endpoint.
+ //
+ // Defaults to "client_secret_basic"
+ TokenEndpointAuthMethod string
+ TokenEndpointAuthSigningAlg string
+
+ // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
+ // user must reauthroize.
+ //
+ // If 0, no limitation is placed on the maximum.
+ DefaultMaxAge int64
+ // RequireAuthTime specifies if the auth_time claim in the ID token is required.
+ RequireAuthTime bool
+
+ // Default Authentication Context Class Reference values for authentication requests.
+ DefaultACRValues []string
+
+ // URI that a third party can use to initiate a login by the relaying party.
+ //
+ // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
+ InitiateLoginURI *url.URL
+ // Pre-registered request_uri values that may be cached by the server.
+ RequestURIs []url.URL
+}
+
+// Defaults returns a shallow copy of ClientMetadata with default
+// values replacing omitted fields.
+func (m ClientMetadata) Defaults() ClientMetadata {
+ if len(m.ResponseTypes) == 0 {
+ m.ResponseTypes = []string{oauth2.ResponseTypeCode}
+ }
+ if len(m.GrantTypes) == 0 {
+ m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
+ }
+ if m.ApplicationType == "" {
+ m.ApplicationType = "web"
+ }
+ if m.TokenEndpointAuthMethod == "" {
+ m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
+ }
+ m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
+ m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
+ m.RequestObjectOptions = m.RequestObjectOptions.defaults()
+ return m
+}
+
+func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
+ e := m.toEncodableStruct()
+ return json.Marshal(&e)
+}
+
+func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
+ var e encodableClientMetadata
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+ meta, err := e.toStruct()
+ if err != nil {
+ return err
+ }
+ if err := meta.Valid(); err != nil {
+ return err
+ }
+ *m = meta
+ return nil
+}
+
+type encodableClientMetadata struct {
+ RedirectURIs []string `json:"redirect_uris"` // Required
+ ResponseTypes []string `json:"response_types,omitempty"`
+ GrantTypes []string `json:"grant_types,omitempty"`
+ ApplicationType string `json:"application_type,omitempty"`
+ Contacts []string `json:"contacts,omitempty"`
+ ClientName string `json:"client_name,omitempty"`
+ LogoURI string `json:"logo_uri,omitempty"`
+ ClientURI string `json:"client_uri,omitempty"`
+ PolicyURI string `json:"policy_uri,omitempty"`
+ TermsOfServiceURI string `json:"tos_uri,omitempty"`
+ JWKSURI string `json:"jwks_uri,omitempty"`
+ JWKS *jose.JWKSet `json:"jwks,omitempty"`
+ SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
+ SubjectType string `json:"subject_type,omitempty"`
+ IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
+ IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
+ IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
+ UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
+ UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
+ UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
+ RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
+ RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
+ RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
+ TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
+ TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
+ DefaultMaxAge int64 `json:"default_max_age,omitempty"`
+ RequireAuthTime bool `json:"require_auth_time,omitempty"`
+ DefaultACRValues []string `json:"default_acr_values,omitempty"`
+ InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
+ RequestURIs []string `json:"request_uris,omitempty"`
+}
+
+func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
+ p := stickyErrParser{}
+ m := ClientMetadata{
+ RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
+ ResponseTypes: c.ResponseTypes,
+ GrantTypes: c.GrantTypes,
+ ApplicationType: c.ApplicationType,
+ Contacts: p.parseEmails(c.Contacts, "contacts"),
+ ClientName: c.ClientName,
+ LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
+ ClientURI: p.parseURI(c.ClientURI, "client_uri"),
+ PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
+ TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
+ JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
+ JWKS: c.JWKS,
+ SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
+ SubjectType: c.SubjectType,
+ TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
+ TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
+ DefaultMaxAge: c.DefaultMaxAge,
+ RequireAuthTime: c.RequireAuthTime,
+ DefaultACRValues: c.DefaultACRValues,
+ InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
+ RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
+ IDTokenResponseOptions: JWAOptions{
+ c.IDTokenSignedResponseAlg,
+ c.IDTokenEncryptedResponseAlg,
+ c.IDTokenEncryptedResponseEnc,
+ },
+ UserInfoResponseOptions: JWAOptions{
+ c.UserInfoSignedResponseAlg,
+ c.UserInfoEncryptedResponseAlg,
+ c.UserInfoEncryptedResponseEnc,
+ },
+ RequestObjectOptions: JWAOptions{
+ c.RequestObjectSigningAlg,
+ c.RequestObjectEncryptionAlg,
+ c.RequestObjectEncryptionEnc,
+ },
+ }
+ if p.firstErr != nil {
+ return ClientMetadata{}, p.firstErr
+ }
+ return m, nil
+}
+
+// stickyErrParser parses URIs and email addresses. Once it encounters
+// a parse error, subsequent calls become no-op.
+type stickyErrParser struct {
+ firstErr error
+}
+
+func (p *stickyErrParser) parseURI(s, field string) *url.URL {
+ if p.firstErr != nil || s == "" {
+ return nil
+ }
+ u, err := url.Parse(s)
+ if err == nil {
+ if u.Host == "" {
+ err = errors.New("no host in URI")
+ } else if u.Scheme != "http" && u.Scheme != "https" {
+ err = errors.New("invalid URI scheme")
+ }
+ }
+ if err != nil {
+ p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
+ return nil
+ }
+ return u
+}
+
+func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
+ if p.firstErr != nil || len(s) == 0 {
+ return nil
+ }
+ uris := make([]url.URL, len(s))
+ for i, val := range s {
+ if val == "" {
+ p.firstErr = fmt.Errorf("invalid URI in field %s", field)
+ return nil
+ }
+ if u := p.parseURI(val, field); u != nil {
+ uris[i] = *u
+ }
+ }
+ return uris
+}
+
+func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
+ if p.firstErr != nil || len(s) == 0 {
+ return nil
+ }
+ addrs := make([]mail.Address, len(s))
+ for i, addr := range s {
+ if addr == "" {
+ p.firstErr = fmt.Errorf("invalid email in field %s", field)
+ return nil
+ }
+ a, err := mail.ParseAddress(addr)
+ if err != nil {
+ p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
+ return nil
+ }
+ addrs[i] = *a
+ }
+ return addrs
+}
+
+func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
+ return encodableClientMetadata{
+ RedirectURIs: urisToStrings(m.RedirectURIs),
+ ResponseTypes: m.ResponseTypes,
+ GrantTypes: m.GrantTypes,
+ ApplicationType: m.ApplicationType,
+ Contacts: emailsToStrings(m.Contacts),
+ ClientName: m.ClientName,
+ LogoURI: uriToString(m.LogoURI),
+ ClientURI: uriToString(m.ClientURI),
+ PolicyURI: uriToString(m.PolicyURI),
+ TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
+ JWKSURI: uriToString(m.JWKSURI),
+ JWKS: m.JWKS,
+ SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
+ SubjectType: m.SubjectType,
+ IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
+ IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
+ IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
+ UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
+ UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
+ UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
+ RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
+ RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
+ RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
+ TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
+ TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
+ DefaultMaxAge: m.DefaultMaxAge,
+ RequireAuthTime: m.RequireAuthTime,
+ DefaultACRValues: m.DefaultACRValues,
+ InitiateLoginURI: uriToString(m.InitiateLoginURI),
+ RequestURIs: urisToStrings(m.RequestURIs),
+ }
+}
+
+func uriToString(u *url.URL) string {
+ if u == nil {
+ return ""
+ }
+ return u.String()
+}
+
+func urisToStrings(urls []url.URL) []string {
+ if len(urls) == 0 {
+ return nil
+ }
+ sli := make([]string, len(urls))
+ for i, u := range urls {
+ sli[i] = u.String()
+ }
+ return sli
+}
+
+func emailsToStrings(addrs []mail.Address) []string {
+ if len(addrs) == 0 {
+ return nil
+ }
+ sli := make([]string, len(addrs))
+ for i, addr := range addrs {
+ sli[i] = addr.String()
+ }
+ return sli
+}
+
+// Valid determines if a ClientMetadata conforms with the OIDC specification.
+//
+// Valid is called by UnmarshalJSON.
+//
+// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
+// URLs fields where the OIDC spec requires it. This may change in future releases
+// of this package. See: https://github.com/coreos/go-oidc/issues/34
+func (m *ClientMetadata) Valid() error {
+ if len(m.RedirectURIs) == 0 {
+ return errors.New("zero redirect URLs")
+ }
+
+ validURI := func(u *url.URL, fieldName string) error {
+ if u.Host == "" {
+ return fmt.Errorf("no host for uri field %s", fieldName)
+ }
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
+ }
+ return nil
+ }
+
+ uris := []struct {
+ val *url.URL
+ name string
+ }{
+ {m.LogoURI, "logo_uri"},
+ {m.ClientURI, "client_uri"},
+ {m.PolicyURI, "policy_uri"},
+ {m.TermsOfServiceURI, "tos_uri"},
+ {m.JWKSURI, "jwks_uri"},
+ {m.SectorIdentifierURI, "sector_identifier_uri"},
+ {m.InitiateLoginURI, "initiate_login_uri"},
+ }
+
+ for _, uri := range uris {
+ if uri.val == nil {
+ continue
+ }
+ if err := validURI(uri.val, uri.name); err != nil {
+ return err
+ }
+ }
+
+ uriLists := []struct {
+ vals []url.URL
+ name string
+ }{
+ {m.RedirectURIs, "redirect_uris"},
+ {m.RequestURIs, "request_uris"},
+ }
+ for _, list := range uriLists {
+ for _, uri := range list.vals {
+ if err := validURI(&uri, list.name); err != nil {
+ return err
+ }
+ }
+ }
+
+ options := []struct {
+ option JWAOptions
+ name string
+ }{
+ {m.IDTokenResponseOptions, "id_token response"},
+ {m.UserInfoResponseOptions, "userinfo response"},
+ {m.RequestObjectOptions, "request_object"},
+ }
+ for _, option := range options {
+ if err := option.option.valid(); err != nil {
+ return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
+ }
+ }
+ return nil
+}
+
+type ClientRegistrationResponse struct {
+ ClientID string // Required
+ ClientSecret string
+ RegistrationAccessToken string
+ RegistrationClientURI string
+ // If IsZero is true, unspecified.
+ ClientIDIssuedAt time.Time
+ // Time at which the client_secret will expire.
+ // If IsZero is true, it will not expire.
+ ClientSecretExpiresAt time.Time
+
+ ClientMetadata
+}
+
+type encodableClientRegistrationResponse struct {
+ ClientID string `json:"client_id"` // Required
+ ClientSecret string `json:"client_secret,omitempty"`
+ RegistrationAccessToken string `json:"registration_access_token,omitempty"`
+ RegistrationClientURI string `json:"registration_client_uri,omitempty"`
+ ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
+ // Time at which the client_secret will expire, in seconds since the epoch.
+ // If 0 it will not expire.
+ ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
+
+ encodableClientMetadata
+}
+
+func unixToSec(t time.Time) int64 {
+ if t.IsZero() {
+ return 0
+ }
+ return t.Unix()
+}
+
+func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
+ e := encodableClientRegistrationResponse{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RegistrationAccessToken: c.RegistrationAccessToken,
+ RegistrationClientURI: c.RegistrationClientURI,
+ ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
+ ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
+ encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
+ }
+ return json.Marshal(&e)
+}
+
+func secToUnix(sec int64) time.Time {
+ if sec == 0 {
+ return time.Time{}
+ }
+ return time.Unix(sec, 0)
+}
+
+func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
+ var e encodableClientRegistrationResponse
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+ if e.ClientID == "" {
+ return errors.New("no client_id in client registration response")
+ }
+ metadata, err := e.encodableClientMetadata.toStruct()
+ if err != nil {
+ return err
+ }
+ *c = ClientRegistrationResponse{
+ ClientID: e.ClientID,
+ ClientSecret: e.ClientSecret,
+ RegistrationAccessToken: e.RegistrationAccessToken,
+ RegistrationClientURI: e.RegistrationClientURI,
+ ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
+ ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
+ ClientMetadata: metadata,
+ }
+ return nil
+}
+
+type ClientConfig struct {
+ HTTPClient phttp.Client
+ Credentials ClientCredentials
+ Scope []string
+ RedirectURL string
+ ProviderConfig ProviderConfig
+ KeySet key.PublicKeySet
+}
+
+func NewClient(cfg ClientConfig) (*Client, error) {
+ // Allow empty redirect URL in the case where the client
+ // only needs to verify a given token.
+ ru, err := url.Parse(cfg.RedirectURL)
+ if err != nil {
+ return nil, fmt.Errorf("invalid redirect URL: %v", err)
+ }
+
+ c := Client{
+ credentials: cfg.Credentials,
+ httpClient: cfg.HTTPClient,
+ scope: cfg.Scope,
+ redirectURL: ru.String(),
+ providerConfig: newProviderConfigRepo(cfg.ProviderConfig),
+ keySet: cfg.KeySet,
+ }
+
+ if c.httpClient == nil {
+ c.httpClient = http.DefaultClient
+ }
+
+ if c.scope == nil {
+ c.scope = make([]string, len(DefaultScope))
+ copy(c.scope, DefaultScope)
+ }
+
+ return &c, nil
+}
+
+type Client struct {
+ httpClient phttp.Client
+ providerConfig *providerConfigRepo
+ credentials ClientCredentials
+ redirectURL string
+ scope []string
+ keySet key.PublicKeySet
+ providerSyncer *ProviderConfigSyncer
+
+ keySetSyncMutex sync.RWMutex
+ lastKeySetSync time.Time
+}
+
+func (c *Client) Healthy() error {
+ now := time.Now().UTC()
+
+ cfg := c.providerConfig.Get()
+
+ if cfg.Empty() {
+ return errors.New("oidc client provider config empty")
+ }
+
+ if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) {
+ return errors.New("oidc client provider config expired")
+ }
+
+ return nil
+}
+
+func (c *Client) OAuthClient() (*oauth2.Client, error) {
+ cfg := c.providerConfig.Get()
+ authMethod, err := chooseAuthMethod(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ ocfg := oauth2.Config{
+ Credentials: oauth2.ClientCredentials(c.credentials),
+ RedirectURL: c.redirectURL,
+ AuthURL: cfg.AuthEndpoint.String(),
+ TokenURL: cfg.TokenEndpoint.String(),
+ Scope: c.scope,
+ AuthMethod: authMethod,
+ }
+
+ return oauth2.NewClient(c.httpClient, ocfg)
+}
+
+func chooseAuthMethod(cfg ProviderConfig) (string, error) {
+ if len(cfg.TokenEndpointAuthMethodsSupported) == 0 {
+ return oauth2.AuthMethodClientSecretBasic, nil
+ }
+
+ for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported {
+ if _, ok := supportedAuthMethods[authMethod]; ok {
+ return authMethod, nil
+ }
+ }
+
+ return "", errors.New("no supported auth methods")
+}
+
+// SyncProviderConfig starts the provider config syncer
+func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
+ r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
+ s := NewProviderConfigSyncer(r, c.providerConfig)
+ stop := s.Run()
+ s.WaitUntilInitialSync()
+ return stop
+}
+
+func (c *Client) maybeSyncKeys() error {
+ tooSoon := func() bool {
+ return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow))
+ }
+
+ // ignore request to sync keys if a sync operation has been
+ // attempted too recently
+ if tooSoon() {
+ return nil
+ }
+
+ c.keySetSyncMutex.Lock()
+ defer c.keySetSyncMutex.Unlock()
+
+ // check again, as another goroutine may have been holding
+ // the lock while updating the keys
+ if tooSoon() {
+ return nil
+ }
+
+ cfg := c.providerConfig.Get()
+ r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
+ w := &clientKeyRepo{client: c}
+ _, err := key.Sync(r, w)
+ c.lastKeySetSync = time.Now().UTC()
+
+ return err
+}
+
+type clientKeyRepo struct {
+ client *Client
+}
+
+func (r *clientKeyRepo) Set(ks key.KeySet) error {
+ pks, ok := ks.(*key.PublicKeySet)
+ if !ok {
+ return errors.New("unable to cast to PublicKey")
+ }
+ r.client.keySet = *pks
+ return nil
+}
+
+func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) {
+ cfg := c.providerConfig.Get()
+
+ if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) {
+ return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds)
+ }
+
+ oac, err := c.OAuthClient()
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ t, err := oac.ClientCredsToken(scope)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ jwt, err := jose.ParseJWT(t.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ return jwt, c.VerifyJWT(jwt)
+}
+
+// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token.
+func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) {
+ oac, err := c.OAuthClient()
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ jwt, err := jose.ParseJWT(t.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ return jwt, c.VerifyJWT(jwt)
+}
+
+// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token.
+func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) {
+ oac, err := c.OAuthClient()
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ jwt, err := jose.ParseJWT(t.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ return jwt, c.VerifyJWT(jwt)
+}
+
+func (c *Client) VerifyJWT(jwt jose.JWT) error {
+ var keysFunc func() []key.PublicKey
+ if kID, ok := jwt.KeyID(); ok {
+ keysFunc = c.keysFuncWithID(kID)
+ } else {
+ keysFunc = c.keysFuncAll()
+ }
+
+ v := NewJWTVerifier(
+ c.providerConfig.Get().Issuer.String(),
+ c.credentials.ID,
+ c.maybeSyncKeys, keysFunc)
+
+ return v.Verify(jwt)
+}
+
+// keysFuncWithID returns a function that retrieves at most unexpired
+// public key from the Client that matches the provided ID
+func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey {
+ return func() []key.PublicKey {
+ c.keySetSyncMutex.RLock()
+ defer c.keySetSyncMutex.RUnlock()
+
+ if c.keySet.ExpiresAt().Before(time.Now()) {
+ return []key.PublicKey{}
+ }
+
+ k := c.keySet.Key(kID)
+ if k == nil {
+ return []key.PublicKey{}
+ }
+
+ return []key.PublicKey{*k}
+ }
+}
+
+// keysFuncAll returns a function that retrieves all unexpired public
+// keys from the Client
+func (c *Client) keysFuncAll() func() []key.PublicKey {
+ return func() []key.PublicKey {
+ c.keySetSyncMutex.RLock()
+ defer c.keySetSyncMutex.RUnlock()
+
+ if c.keySet.ExpiresAt().Before(time.Now()) {
+ return []key.PublicKey{}
+ }
+
+ return c.keySet.Keys()
+ }
+}
+
+type providerConfigRepo struct {
+ mu sync.RWMutex
+ config ProviderConfig // do not access directly, use Get()
+}
+
+func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
+ return &providerConfigRepo{sync.RWMutex{}, pc}
+}
+
+// returns an error to implement ProviderConfigSetter
+func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.config = cfg
+ return nil
+}
+
+func (r *providerConfigRepo) Get() ProviderConfig {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.config
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go
new file mode 100644
index 0000000..9bfa8e3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go
@@ -0,0 +1,44 @@
+package oidc
+
+import (
+ "errors"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+)
+
+type Identity struct {
+ ID string
+ Name string
+ Email string
+ ExpiresAt time.Time
+}
+
+func IdentityFromClaims(claims jose.Claims) (*Identity, error) {
+ if claims == nil {
+ return nil, errors.New("nil claim set")
+ }
+
+ var ident Identity
+ var err error
+ var ok bool
+
+ if ident.ID, ok, err = claims.StringClaim("sub"); err != nil {
+ return nil, err
+ } else if !ok {
+ return nil, errors.New("missing required claim: sub")
+ }
+
+ if ident.Email, _, err = claims.StringClaim("email"); err != nil {
+ return nil, err
+ }
+
+ exp, ok, err := claims.TimeClaim("exp")
+ if err != nil {
+ return nil, err
+ } else if ok {
+ ident.ExpiresAt = exp
+ }
+
+ return &ident, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go
new file mode 100644
index 0000000..248cac0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go
@@ -0,0 +1,3 @@
+package oidc
+
+type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error)
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go
new file mode 100644
index 0000000..82a0f56
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go
@@ -0,0 +1,67 @@
+package oidc
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "time"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/key"
+)
+
+// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
+// Cache-Control header is provided by the JWK Set document endpoint.
+const DefaultPublicKeySetTTL = 24 * time.Hour
+
+// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
+func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
+ return &remotePublicKeyRepo{hc: hc, ep: ep}
+}
+
+type remotePublicKeyRepo struct {
+ hc phttp.Client
+ ep string
+}
+
+// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
+// is set on the Key Set to avoid it having to be re-retrieved for every
+// encryption event. This TTL is typically controlled by the endpoint returning
+// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
+// is found.
+func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
+ req, err := http.NewRequest("GET", r.ep, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := r.hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var d struct {
+ Keys []jose.JWK `json:"keys"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
+ return nil, err
+ }
+
+ if len(d.Keys) == 0 {
+ return nil, errors.New("zero keys in response")
+ }
+
+ ttl, ok, err := phttp.Cacheable(resp.Header)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ ttl = DefaultPublicKeySetTTL
+ }
+
+ exp := time.Now().UTC().Add(ttl)
+ ks := key.NewPublicKeySet(d.Keys, exp)
+ return ks, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go
new file mode 100644
index 0000000..1235890
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go
@@ -0,0 +1,688 @@
+package oidc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ "github.com/coreos/pkg/timeutil"
+ "github.com/jonboulle/clockwork"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/oauth2"
+)
+
+var (
+ log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
+)
+
+const (
+ // Subject Identifier types defined by the OIDC spec. Specifies if the provider
+ // should provide the same sub claim value to all clients (public) or a unique
+ // value for each client (pairwise).
+ //
+ // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
+ SubjectTypePublic = "public"
+ SubjectTypePairwise = "pairwise"
+)
+
+var (
+ // Default values for omitted provider config fields.
+ //
+ // Use ProviderConfig's Defaults method to fill a provider config with these values.
+ DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
+ DefaultResponseModesSupported = []string{"query", "fragment"}
+ DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
+ DefaultClaimTypesSupported = []string{"normal"}
+)
+
+const (
+ MaximumProviderConfigSyncInterval = 24 * time.Hour
+ MinimumProviderConfigSyncInterval = time.Minute
+
+ discoveryConfigPath = "/.well-known/openid-configuration"
+)
+
+// internally configurable for tests
+var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
+
+var (
+ // Ensure ProviderConfig satisfies these interfaces.
+ _ json.Marshaler = &ProviderConfig{}
+ _ json.Unmarshaler = &ProviderConfig{}
+)
+
+// ProviderConfig represents the OpenID Provider Metadata specifying what
+// configurations a provider supports.
+//
+// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+type ProviderConfig struct {
+ Issuer *url.URL // Required
+ AuthEndpoint *url.URL // Required
+ TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
+ UserInfoEndpoint *url.URL
+ KeysEndpoint *url.URL // Required
+ RegistrationEndpoint *url.URL
+
+ // Servers MAY choose not to advertise some supported scope values even when this
+ // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
+ ScopesSupported []string
+ // OAuth2.0 response types supported.
+ ResponseTypesSupported []string // Required
+ // OAuth2.0 response modes supported.
+ //
+ // If omitted, defaults to DefaultResponseModesSupported.
+ ResponseModesSupported []string
+ // OAuth2.0 grant types supported.
+ //
+ // If omitted, defaults to DefaultGrantTypesSupported.
+ GrantTypesSupported []string
+ ACRValuesSupported []string
+ // SubjectTypesSupported specifies strategies for providing values for the sub claim.
+ SubjectTypesSupported []string // Required
+
+ // JWA signing and encryption algorith values supported for ID tokens.
+ IDTokenSigningAlgValues []string // Required
+ IDTokenEncryptionAlgValues []string
+ IDTokenEncryptionEncValues []string
+
+ // JWA signing and encryption algorith values supported for user info responses.
+ UserInfoSigningAlgValues []string
+ UserInfoEncryptionAlgValues []string
+ UserInfoEncryptionEncValues []string
+
+ // JWA signing and encryption algorith values supported for request objects.
+ ReqObjSigningAlgValues []string
+ ReqObjEncryptionAlgValues []string
+ ReqObjEncryptionEncValues []string
+
+ TokenEndpointAuthMethodsSupported []string
+ TokenEndpointAuthSigningAlgValuesSupported []string
+ DisplayValuesSupported []string
+ ClaimTypesSupported []string
+ ClaimsSupported []string
+ ServiceDocs *url.URL
+ ClaimsLocalsSupported []string
+ UILocalsSupported []string
+ ClaimsParameterSupported bool
+ RequestParameterSupported bool
+ RequestURIParamaterSupported bool
+ RequireRequestURIRegistration bool
+
+ Policy *url.URL
+ TermsOfService *url.URL
+
+ // Not part of the OpenID Provider Metadata
+ ExpiresAt time.Time
+}
+
+// Defaults returns a shallow copy of ProviderConfig with default
+// values replacing omitted fields.
+//
+// var cfg oidc.ProviderConfig
+// // Fill provider config with default values for omitted fields.
+// cfg = cfg.Defaults()
+//
+func (p ProviderConfig) Defaults() ProviderConfig {
+ setDefault := func(val *[]string, defaultVal []string) {
+ if len(*val) == 0 {
+ *val = defaultVal
+ }
+ }
+ setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
+ setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
+ setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
+ setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
+ return p
+}
+
+func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
+ e := p.toEncodableStruct()
+ return json.Marshal(&e)
+}
+
+func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
+ var e encodableProviderConfig
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+ conf, err := e.toStruct()
+ if err != nil {
+ return err
+ }
+ if err := conf.Valid(); err != nil {
+ return err
+ }
+ *p = conf
+ return nil
+}
+
+type encodableProviderConfig struct {
+ Issuer string `json:"issuer"`
+ AuthEndpoint string `json:"authorization_endpoint"`
+ TokenEndpoint string `json:"token_endpoint"`
+ UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
+ KeysEndpoint string `json:"jwks_uri"`
+ RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
+
+ // Use 'omitempty' for all slices as per OIDC spec:
+ // "Claims that return multiple values are represented as JSON arrays.
+ // Claims with zero elements MUST be omitted from the response."
+ // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
+
+ ScopesSupported []string `json:"scopes_supported,omitempty"`
+ ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
+ ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
+ GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
+ ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
+ SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
+
+ IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
+ IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
+ IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
+ UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
+ UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
+ UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
+ ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
+ ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
+ ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
+
+ TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
+ TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
+
+ DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
+ ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
+ ClaimsSupported []string `json:"claims_supported,omitempty"`
+ ServiceDocs string `json:"service_documentation,omitempty"`
+ ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
+ UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
+ ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
+ RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
+ RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
+ RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
+
+ Policy string `json:"op_policy_uri,omitempty"`
+ TermsOfService string `json:"op_tos_uri,omitempty"`
+}
+
+func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
+ return encodableProviderConfig{
+ Issuer: uriToString(cfg.Issuer),
+ AuthEndpoint: uriToString(cfg.AuthEndpoint),
+ TokenEndpoint: uriToString(cfg.TokenEndpoint),
+ UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
+ KeysEndpoint: uriToString(cfg.KeysEndpoint),
+ RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
+ ScopesSupported: cfg.ScopesSupported,
+ ResponseTypesSupported: cfg.ResponseTypesSupported,
+ ResponseModesSupported: cfg.ResponseModesSupported,
+ GrantTypesSupported: cfg.GrantTypesSupported,
+ ACRValuesSupported: cfg.ACRValuesSupported,
+ SubjectTypesSupported: cfg.SubjectTypesSupported,
+ IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
+ IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
+ IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
+ UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
+ UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
+ UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
+ ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
+ ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
+ ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
+ TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
+ TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
+ DisplayValuesSupported: cfg.DisplayValuesSupported,
+ ClaimTypesSupported: cfg.ClaimTypesSupported,
+ ClaimsSupported: cfg.ClaimsSupported,
+ ServiceDocs: uriToString(cfg.ServiceDocs),
+ ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
+ UILocalsSupported: cfg.UILocalsSupported,
+ ClaimsParameterSupported: cfg.ClaimsParameterSupported,
+ RequestParameterSupported: cfg.RequestParameterSupported,
+ RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
+ RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
+ Policy: uriToString(cfg.Policy),
+ TermsOfService: uriToString(cfg.TermsOfService),
+ }
+}
+
+func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
+ p := stickyErrParser{}
+ conf := ProviderConfig{
+ Issuer: p.parseURI(e.Issuer, "issuer"),
+ AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
+ TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
+ UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
+ KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
+ RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
+ ScopesSupported: e.ScopesSupported,
+ ResponseTypesSupported: e.ResponseTypesSupported,
+ ResponseModesSupported: e.ResponseModesSupported,
+ GrantTypesSupported: e.GrantTypesSupported,
+ ACRValuesSupported: e.ACRValuesSupported,
+ SubjectTypesSupported: e.SubjectTypesSupported,
+ IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
+ IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
+ IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
+ UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
+ UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
+ UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
+ ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
+ ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
+ ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
+ TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
+ TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
+ DisplayValuesSupported: e.DisplayValuesSupported,
+ ClaimTypesSupported: e.ClaimTypesSupported,
+ ClaimsSupported: e.ClaimsSupported,
+ ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
+ ClaimsLocalsSupported: e.ClaimsLocalsSupported,
+ UILocalsSupported: e.UILocalsSupported,
+ ClaimsParameterSupported: e.ClaimsParameterSupported,
+ RequestParameterSupported: e.RequestParameterSupported,
+ RequestURIParamaterSupported: e.RequestURIParamaterSupported,
+ RequireRequestURIRegistration: e.RequireRequestURIRegistration,
+ Policy: p.parseURI(e.Policy, "op_policy-uri"),
+ TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
+ }
+ if p.firstErr != nil {
+ return ProviderConfig{}, p.firstErr
+ }
+ return conf, nil
+}
+
+// Empty returns if a ProviderConfig holds no information.
+//
+// This case generally indicates a ProviderConfigGetter has experienced an error
+// and has nothing to report.
+func (p ProviderConfig) Empty() bool {
+ return p.Issuer == nil
+}
+
+func contains(sli []string, ele string) bool {
+ for _, s := range sli {
+ if s == ele {
+ return true
+ }
+ }
+ return false
+}
+
+// Valid determines if a ProviderConfig conforms with the OIDC specification.
+// If Valid returns successfully it guarantees required field are non-nil and
+// URLs are well formed.
+//
+// Valid is called by UnmarshalJSON.
+//
+// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
+// URLs fields where the OIDC spec requires it. This may change in future releases
+// of this package. See: https://github.com/coreos/go-oidc/issues/34
+func (p ProviderConfig) Valid() error {
+ grantTypes := p.GrantTypesSupported
+ if len(grantTypes) == 0 {
+ grantTypes = DefaultGrantTypesSupported
+ }
+ implicitOnly := true
+ for _, grantType := range grantTypes {
+ if grantType != oauth2.GrantTypeImplicit {
+ implicitOnly = false
+ break
+ }
+ }
+
+ if len(p.SubjectTypesSupported) == 0 {
+ return errors.New("missing required field subject_types_supported")
+ }
+ if len(p.IDTokenSigningAlgValues) == 0 {
+ return errors.New("missing required field id_token_signing_alg_values_supported")
+ }
+
+ if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
+ return errors.New("scoped_supported must be unspecified or include 'openid'")
+ }
+
+ if !contains(p.IDTokenSigningAlgValues, "RS256") {
+ return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
+ }
+ if contains(p.TokenEndpointAuthMethodsSupported, "none") {
+ return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'")
+ }
+
+ uris := []struct {
+ val *url.URL
+ name string
+ required bool
+ }{
+ {p.Issuer, "issuer", true},
+ {p.AuthEndpoint, "authorization_endpoint", true},
+ {p.TokenEndpoint, "token_endpoint", !implicitOnly},
+ {p.UserInfoEndpoint, "userinfo_endpoint", false},
+ {p.KeysEndpoint, "jwks_uri", true},
+ {p.RegistrationEndpoint, "registration_endpoint", false},
+ {p.ServiceDocs, "service_documentation", false},
+ {p.Policy, "op_policy_uri", false},
+ {p.TermsOfService, "op_tos_uri", false},
+ }
+
+ for _, uri := range uris {
+ if uri.val == nil {
+ if !uri.required {
+ continue
+ }
+ return fmt.Errorf("empty value for required uri field %s", uri.name)
+ }
+ if uri.val.Host == "" {
+ return fmt.Errorf("no host for uri field %s", uri.name)
+ }
+ if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
+ return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
+ }
+ }
+ return nil
+}
+
+// Supports determines if provider supports a client given their respective metadata.
+func (p ProviderConfig) Supports(c ClientMetadata) error {
+ if err := p.Valid(); err != nil {
+ return fmt.Errorf("invalid provider config: %v", err)
+ }
+ if err := c.Valid(); err != nil {
+ return fmt.Errorf("invalid client config: %v", err)
+ }
+
+ // Fill default values for omitted fields
+ c = c.Defaults()
+ p = p.Defaults()
+
+ // Do the supported values list the requested one?
+ supports := []struct {
+ supported []string
+ requested string
+ name string
+ }{
+ {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
+ {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
+ {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
+ {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
+ {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
+ {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
+ {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
+ {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
+ {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
+ }
+ for _, field := range supports {
+ if field.requested == "" {
+ continue
+ }
+ if !contains(field.supported, field.requested) {
+ return fmt.Errorf("provider does not support requested value for field %s", field.name)
+ }
+ }
+
+ stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
+
+ // For lists, are the list of requested values a subset of the supported ones?
+ supportsAll := []struct {
+ supported []string
+ requested []string
+ name string
+ // OAuth2.0 response_type can be space separated lists where order doesn't matter.
+ // For example "id_token token" is the same as "token id_token"
+ // Support a custom compare method.
+ comp func(s1, s2 string) bool
+ }{
+ {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
+ {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
+ }
+ for _, field := range supportsAll {
+ requestLoop:
+ for _, req := range field.requested {
+ for _, sup := range field.supported {
+ if field.comp(req, sup) {
+ continue requestLoop
+ }
+ }
+ return fmt.Errorf("provider does not support requested value for field %s", field.name)
+ }
+ }
+
+ // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
+
+ return nil
+}
+
+func (p ProviderConfig) SupportsGrantType(grantType string) bool {
+ var supported []string
+ if len(p.GrantTypesSupported) == 0 {
+ supported = DefaultGrantTypesSupported
+ } else {
+ supported = p.GrantTypesSupported
+ }
+
+ for _, t := range supported {
+ if t == grantType {
+ return true
+ }
+ }
+ return false
+}
+
+type ProviderConfigGetter interface {
+ Get() (ProviderConfig, error)
+}
+
+type ProviderConfigSetter interface {
+ Set(ProviderConfig) error
+}
+
+type ProviderConfigSyncer struct {
+ from ProviderConfigGetter
+ to ProviderConfigSetter
+ clock clockwork.Clock
+
+ initialSyncDone bool
+ initialSyncWait sync.WaitGroup
+}
+
+func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
+ return &ProviderConfigSyncer{
+ from: from,
+ to: to,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+func (s *ProviderConfigSyncer) Run() chan struct{} {
+ stop := make(chan struct{})
+
+ var next pcsStepper
+ next = &pcsStepNext{aft: time.Duration(0)}
+
+ s.initialSyncWait.Add(1)
+ go func() {
+ for {
+ select {
+ case <-s.clock.After(next.after()):
+ next = next.step(s.sync)
+ case <-stop:
+ return
+ }
+ }
+ }()
+
+ return stop
+}
+
+func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
+ s.initialSyncWait.Wait()
+}
+
+func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
+ cfg, err := s.from.Get()
+ if err != nil {
+ return 0, err
+ }
+
+ if err = s.to.Set(cfg); err != nil {
+ return 0, fmt.Errorf("error setting provider config: %v", err)
+ }
+
+ if !s.initialSyncDone {
+ s.initialSyncWait.Done()
+ s.initialSyncDone = true
+ }
+
+ log.Infof("Updating provider config: config=%#v", cfg)
+
+ return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
+}
+
+type pcsStepFunc func() (time.Duration, error)
+
+type pcsStepper interface {
+ after() time.Duration
+ step(pcsStepFunc) pcsStepper
+}
+
+type pcsStepNext struct {
+ aft time.Duration
+}
+
+func (n *pcsStepNext) after() time.Duration {
+ return n.aft
+}
+
+func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
+ ttl, err := fn()
+ if err == nil {
+ next = &pcsStepNext{aft: ttl}
+ log.Debugf("Synced provider config, next attempt in %v", next.after())
+ } else {
+ next = &pcsStepRetry{aft: time.Second}
+ log.Errorf("Provider config sync failed, retrying in %v: %v", next.after(), err)
+ }
+ return
+}
+
+type pcsStepRetry struct {
+ aft time.Duration
+}
+
+func (r *pcsStepRetry) after() time.Duration {
+ return r.aft
+}
+
+func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
+ ttl, err := fn()
+ if err == nil {
+ next = &pcsStepNext{aft: ttl}
+ log.Infof("Provider config sync no longer failing")
+ } else {
+ next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
+ log.Errorf("Provider config sync still failing, retrying in %v: %v", next.after(), err)
+ }
+ return
+}
+
+func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration {
+ if exp.IsZero() {
+ return MaximumProviderConfigSyncInterval
+ }
+
+ t := exp.Sub(clock.Now()) / 2
+ if t > MaximumProviderConfigSyncInterval {
+ t = MaximumProviderConfigSyncInterval
+ } else if t < minimumProviderConfigSyncInterval {
+ t = minimumProviderConfigSyncInterval
+ }
+
+ return t
+}
+
+type httpProviderConfigGetter struct {
+ hc phttp.Client
+ issuerURL string
+ clock clockwork.Clock
+}
+
+func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter {
+ return &httpProviderConfigGetter{
+ hc: hc,
+ issuerURL: issuerURL,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
+ // If the Issuer value contains a path component, any terminating / MUST be removed before
+ // appending /.well-known/openid-configuration.
+ // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
+ discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
+ req, err := http.NewRequest("GET", discoveryURL, nil)
+ if err != nil {
+ return
+ }
+
+ resp, err := r.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
+ return
+ }
+
+ var ttl time.Duration
+ var ok bool
+ ttl, ok, err = phttp.Cacheable(resp.Header)
+ if err != nil {
+ return
+ } else if ok {
+ cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl)
+ }
+
+ // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
+ // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
+ if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
+ err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
+ return
+ }
+
+ return
+}
+
+func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) {
+ if hc == nil {
+ hc = http.DefaultClient
+ }
+
+ g := NewHTTPProviderConfigGetter(hc, issuerURL)
+ return g.Get()
+}
+
+func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) {
+ return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock())
+}
+
+func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) {
+ var sleep time.Duration
+ var err error
+ for {
+ pcfg, err = FetchProviderConfig(hc, issuerURL)
+ if err == nil {
+ break
+ }
+
+ sleep = timeutil.ExpBackoff(sleep, time.Minute)
+ fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err)
+ time.Sleep(sleep)
+ }
+
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go
new file mode 100644
index 0000000..61c926d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go
@@ -0,0 +1,88 @@
+package oidc
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/jose"
+)
+
+type TokenRefresher interface {
+ // Verify checks if the provided token is currently valid or not.
+ Verify(jose.JWT) error
+
+ // Refresh attempts to authenticate and retrieve a new token.
+ Refresh() (jose.JWT, error)
+}
+
+type ClientCredsTokenRefresher struct {
+ Issuer string
+ OIDCClient *Client
+}
+
+func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) {
+ _, err = VerifyClientClaims(jwt, c.Issuer)
+ return
+}
+
+func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) {
+ if err = c.OIDCClient.Healthy(); err != nil {
+ err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err)
+ return
+ }
+
+ jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"})
+ if err != nil {
+ err = fmt.Errorf("unable to verify auth code with issuer: %v", err)
+ return
+ }
+
+ return
+}
+
+type AuthenticatedTransport struct {
+ TokenRefresher
+ http.RoundTripper
+
+ mu sync.Mutex
+ jwt jose.JWT
+}
+
+func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.TokenRefresher.Verify(t.jwt) == nil {
+ return t.jwt, nil
+ }
+
+ jwt, err := t.TokenRefresher.Refresh()
+ if err != nil {
+ return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err)
+ }
+
+ t.jwt = jwt
+ return t.jwt, nil
+}
+
+// SetJWT sets the JWT held by the Transport.
+// This is useful for cases in which you want to set an initial JWT.
+func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.jwt = jwt
+}
+
+func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ jwt, err := t.verifiedJWT()
+ if err != nil {
+ return nil, err
+ }
+
+ req := phttp.CopyRequest(r)
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode()))
+ return t.RoundTripper.RoundTrip(req)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go
new file mode 100644
index 0000000..f2a5a19
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go
@@ -0,0 +1,109 @@
+package oidc
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+)
+
+// RequestTokenExtractor funcs extract a raw encoded token from a request.
+type RequestTokenExtractor func(r *http.Request) (string, error)
+
+// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's
+// Authorization header.
+func ExtractBearerToken(r *http.Request) (string, error) {
+ ah := r.Header.Get("Authorization")
+ if ah == "" {
+ return "", errors.New("missing Authorization header")
+ }
+
+ if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" {
+ return "", errors.New("should be a bearer token")
+ }
+
+ val := ah[7:]
+ if len(val) == 0 {
+ return "", errors.New("bearer token is empty")
+ }
+
+ return val, nil
+}
+
+// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request.
+func CookieTokenExtractor(cookieName string) RequestTokenExtractor {
+ return func(r *http.Request) (string, error) {
+ ck, err := r.Cookie(cookieName)
+ if err != nil {
+ return "", fmt.Errorf("token cookie not found in request: %v", err)
+ }
+
+ if ck.Value == "" {
+ return "", errors.New("token cookie found but is empty")
+ }
+
+ return ck.Value, nil
+ }
+}
+
+func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims {
+ return jose.Claims{
+ // required
+ "iss": iss,
+ "sub": sub,
+ "aud": aud,
+ "iat": iat.Unix(),
+ "exp": exp.Unix(),
+ }
+}
+
+func GenClientID(hostport string) (string, error) {
+ b, err := randBytes(32)
+ if err != nil {
+ return "", err
+ }
+
+ var host string
+ if strings.Contains(hostport, ":") {
+ host, _, err = net.SplitHostPort(hostport)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ host = hostport
+ }
+
+ return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil
+}
+
+func randBytes(n int) ([]byte, error) {
+ b := make([]byte, n)
+ got, err := rand.Read(b)
+ if err != nil {
+ return nil, err
+ } else if n != got {
+ return nil, errors.New("unable to generate enough random data")
+ }
+ return b, nil
+}
+
+// urlEqual checks two urls for equality using only the host and path portions.
+func urlEqual(url1, url2 string) bool {
+ u1, err := url.Parse(url1)
+ if err != nil {
+ return false
+ }
+ u2, err := url.Parse(url2)
+ if err != nil {
+ return false
+ }
+
+ return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go
new file mode 100644
index 0000000..0024130
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go
@@ -0,0 +1,188 @@
+package oidc
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/key"
+)
+
+func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) {
+ jwtBytes := []byte(jwt.Data())
+ for _, k := range keys {
+ v, err := k.Verifier()
+ if err != nil {
+ return false, err
+ }
+ if v.Verify(jwt.Signature, jwtBytes) == nil {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// containsString returns true if the given string(needle) is found
+// in the string array(haystack).
+func containsString(needle string, haystack []string) bool {
+ for _, v := range haystack {
+ if v == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// Verify claims in accordance with OIDC spec
+// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation
+func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {
+ now := time.Now().UTC()
+
+ claims, err := jwt.Claims()
+ if err != nil {
+ return err
+ }
+
+ ident, err := IdentityFromClaims(claims)
+ if err != nil {
+ return err
+ }
+
+ if ident.ExpiresAt.Before(now) {
+ return errors.New("token is expired")
+ }
+
+ // iss REQUIRED. Issuer Identifier for the Issuer of the response.
+ // The iss value is a case sensitive URL using the https scheme that contains scheme,
+ // host, and optionally, port number and path components and no query or fragment components.
+ if iss, exists := claims["iss"].(string); exists {
+ if !urlEqual(iss, issuer) {
+ return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss)
+ }
+ } else {
+ return errors.New("missing claim: 'iss'")
+ }
+
+ // iat REQUIRED. Time at which the JWT was issued.
+ // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z
+ // as measured in UTC until the date/time.
+ if _, exists := claims["iat"].(float64); !exists {
+ return errors.New("missing claim: 'iat'")
+ }
+
+ // aud REQUIRED. Audience(s) that this ID Token is intended for.
+ // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.
+ // It MAY also contain identifiers for other audiences. In the general case, the aud
+ // value is an array of case sensitive strings. In the common special case when there
+ // is one audience, the aud value MAY be a single case sensitive string.
+ if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
+ if aud != clientID {
+ return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID)
+ }
+ } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
+ if !containsString(clientID, aud) {
+ return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID)
+ }
+ } else {
+ return errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
+ }
+
+ return nil
+}
+
+// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT.
+// Returns the client ID if valid, or an error if invalid.
+func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) {
+ claims, err := jwt.Claims()
+ if err != nil {
+ return "", fmt.Errorf("failed to parse JWT claims: %v", err)
+ }
+
+ iss, ok, err := claims.StringClaim("iss")
+ if err != nil {
+ return "", fmt.Errorf("failed to parse 'iss' claim: %v", err)
+ } else if !ok {
+ return "", errors.New("missing required 'iss' claim")
+ } else if !urlEqual(iss, issuer) {
+ return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss)
+ }
+
+ sub, ok, err := claims.StringClaim("sub")
+ if err != nil {
+ return "", fmt.Errorf("failed to parse 'sub' claim: %v", err)
+ } else if !ok {
+ return "", errors.New("missing required 'sub' claim")
+ }
+
+ if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
+ if aud != sub {
+ return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub)
+ }
+ } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
+ if !containsString(sub, aud) {
+ return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub)
+ }
+ } else {
+ return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
+ }
+
+ now := time.Now().UTC()
+ exp, ok, err := claims.TimeClaim("exp")
+ if err != nil {
+ return "", fmt.Errorf("failed to parse 'exp' claim: %v", err)
+ } else if !ok {
+ return "", errors.New("missing required 'exp' claim")
+ } else if exp.Before(now) {
+ return "", fmt.Errorf("token already expired at: %v", exp)
+ }
+
+ return sub, nil
+}
+
+type JWTVerifier struct {
+ issuer string
+ clientID string
+ syncFunc func() error
+ keysFunc func() []key.PublicKey
+ clock clockwork.Clock
+}
+
+func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier {
+ return JWTVerifier{
+ issuer: issuer,
+ clientID: clientID,
+ syncFunc: syncFunc,
+ keysFunc: keysFunc,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+func (v *JWTVerifier) Verify(jwt jose.JWT) error {
+ ok, err := VerifySignature(jwt, v.keysFunc())
+ if ok {
+ goto SignatureVerified
+ } else if err != nil {
+ return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
+ }
+
+ if err = v.syncFunc(); err != nil {
+ return fmt.Errorf("oidc: failed syncing KeySet: %v", err)
+ }
+
+ ok, err = VerifySignature(jwt, v.keysFunc())
+ if err != nil {
+ return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
+ } else if !ok {
+ return errors.New("oidc: unable to verify JWT signature: no matching keys")
+ }
+
+SignatureVerified:
+ if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil {
+ return fmt.Errorf("oidc: JWT claims invalid: %v", err)
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE b/src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go b/src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go
new file mode 100644
index 0000000..7f43499
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go
@@ -0,0 +1,179 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+ PriEmerg Priority = iota
+ PriAlert
+ PriCrit
+ PriErr
+ PriWarning
+ PriNotice
+ PriInfo
+ PriDebug
+)
+
+var conn net.Conn
+
+func init() {
+ var err error
+ conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
+ if err != nil {
+ conn = nil
+ }
+}
+
+// Enabled returns true if the local systemd journal is available for logging
+func Enabled() bool {
+ return conn != nil
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values. Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used. Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details. vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+ if conn == nil {
+ return journalError("could not connect to journald socket")
+ }
+
+ data := new(bytes.Buffer)
+ appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+ appendVariable(data, "MESSAGE", message)
+ for k, v := range vars {
+ appendVariable(data, k, v)
+ }
+
+ _, err := io.Copy(conn, data)
+ if err != nil && isSocketSpaceError(err) {
+ file, err := tempFd()
+ if err != nil {
+ return journalError(err.Error())
+ }
+ defer file.Close()
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return journalError(err.Error())
+ }
+
+ rights := syscall.UnixRights(int(file.Fd()))
+
+ /* this connection should always be a UnixConn, but better safe than sorry */
+ unixConn, ok := conn.(*net.UnixConn)
+ if !ok {
+ return journalError("can't send file through non-Unix connection")
+ }
+ unixConn.WriteMsgUnix([]byte{}, rights, nil)
+ } else if err != nil {
+ return journalError(err.Error())
+ }
+ return nil
+}
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+ return Send(fmt.Sprintf(format, a...), priority, nil)
+}
+
+func appendVariable(w io.Writer, name, value string) {
+ if !validVarName(name) {
+ journalError("variable name contains invalid character, ignoring")
+ }
+ if strings.ContainsRune(value, '\n') {
+ /* When the value contains a newline, we write:
+ * - the variable name, followed by a newline
+ * - the size (in 64bit little endian format)
+ * - the data, followed by a newline
+ */
+ fmt.Fprintln(w, name)
+ binary.Write(w, binary.LittleEndian, uint64(len(value)))
+ fmt.Fprintln(w, value)
+ } else {
+ /* just write the variable and value all on one line */
+ fmt.Fprintf(w, "%s=%s\n", name, value)
+ }
+}
+
+func validVarName(name string) bool {
+ /* The variable name must be in uppercase and consist only of characters,
+ * numbers and underscores, and may not begin with an underscore. (from the docs)
+ */
+
+ valid := name[0] != '_'
+ for _, c := range name {
+ valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
+ }
+ return valid
+}
+
+func isSocketSpaceError(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if !ok {
+ return false
+ }
+
+ sysErr, ok := opErr.Err.(syscall.Errno)
+ if !ok {
+ return false
+ }
+
+ return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
+}
+
+func tempFd() (*os.File, error) {
+ file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+ if err != nil {
+ return nil, err
+ }
+ syscall.Unlink(file.Name())
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+func journalError(s string) error {
+ s = "journal error: " + s
+ fmt.Fprintln(os.Stderr, s)
+ return errors.New(s)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/LICENSE b/src/kube2msb/vendor/github.com/coreos/pkg/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/NOTICE b/src/kube2msb/vendor/github.com/coreos/pkg/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md
new file mode 100644
index 0000000..81efb1f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md
@@ -0,0 +1,39 @@
+# capnslog, the CoreOS logging package
+
+There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
+capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
+
+### Design Principles
+
+##### `package main` is the place where logging gets turned on and routed
+
+A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
+
+##### All log options are runtime-configurable.
+
+Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
+
+##### There is one log object per package. It is registered under its repository and package name.
+
+`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
+
+##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
+
+Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
+
+Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application.
+
+##### Log objects are an interface
+
+An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
+
+##### Log levels have specific meanings:
+
+ * Critical: Unrecoverable. Must fail.
+ * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
+ * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+ * Notice: Normal, but important (uncommon) log information.
+ * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
+ * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
+ * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go
new file mode 100644
index 0000000..b305a84
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go
@@ -0,0 +1,157 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "runtime"
+ "strings"
+ "time"
+)
+
+type Formatter interface {
+ Format(pkg string, level LogLevel, depth int, entries ...interface{})
+ Flush()
+}
+
+func NewStringFormatter(w io.Writer) Formatter {
+ return &StringFormatter{
+ w: bufio.NewWriter(w),
+ }
+}
+
+type StringFormatter struct {
+ w *bufio.Writer
+}
+
+func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
+ now := time.Now().UTC()
+ s.w.WriteString(now.Format(time.RFC3339))
+ s.w.WriteByte(' ')
+ writeEntries(s.w, pkg, l, i, entries...)
+ s.Flush()
+}
+
+func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
+ if pkg != "" {
+ w.WriteString(pkg + ": ")
+ }
+ str := fmt.Sprint(entries...)
+ endsInNL := strings.HasSuffix(str, "\n")
+ w.WriteString(str)
+ if !endsInNL {
+ w.WriteString("\n")
+ }
+}
+
+func (s *StringFormatter) Flush() {
+ s.w.Flush()
+}
+
+func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
+ return &PrettyFormatter{
+ w: bufio.NewWriter(w),
+ debug: debug,
+ }
+}
+
+type PrettyFormatter struct {
+ w *bufio.Writer
+ debug bool
+}
+
+func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
+ now := time.Now()
+ ts := now.Format("2006-01-02 15:04:05")
+ c.w.WriteString(ts)
+ ms := now.Nanosecond() / 1000
+ c.w.WriteString(fmt.Sprintf(".%06d", ms))
+ if c.debug {
+ _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ if line < 0 {
+ line = 0 // not a real line number
+ }
+ c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
+ }
+ c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
+ writeEntries(c.w, pkg, l, depth, entries...)
+ c.Flush()
+}
+
+func (c *PrettyFormatter) Flush() {
+ c.w.Flush()
+}
+
+// LogFormatter emulates the form of the traditional built-in logger.
+type LogFormatter struct {
+ logger *log.Logger
+ prefix string
+}
+
+// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
+// golang log package to actually do the logging work so that logs look similar.
+func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
+ return &LogFormatter{
+ logger: log.New(w, "", flag), // don't use prefix here
+ prefix: prefix, // save it instead
+ }
+}
+
+// Format builds a log message for the LogFormatter. The LogLevel is ignored.
+func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
+ str := fmt.Sprint(entries...)
+ prefix := lf.prefix
+ if pkg != "" {
+ prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
+ }
+ lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (lf *LogFormatter) Flush() {
+ // noop
+}
+
+// NilFormatter is a no-op log formatter that does nothing.
+type NilFormatter struct {
+}
+
+// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
+// messages so that you can cause part of your logging to be silent.
+func NewNilFormatter() Formatter {
+ return &NilFormatter{}
+}
+
+// Format does nothing.
+func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
+ // noop
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (_ *NilFormatter) Flush() {
+ // noop
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
new file mode 100644
index 0000000..426603e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
@@ -0,0 +1,96 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var pid = os.Getpid()
+
+type GlogFormatter struct {
+ StringFormatter
+}
+
+func NewGlogFormatter(w io.Writer) *GlogFormatter {
+ g := &GlogFormatter{}
+ g.w = bufio.NewWriter(w)
+ return g
+}
+
+func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
+ g.w.Write(GlogHeader(level, depth+1))
+ g.StringFormatter.Format(pkg, level, depth+1, entries...)
+}
+
+func GlogHeader(level LogLevel, depth int) []byte {
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ now := time.Now().UTC()
+ _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ if line < 0 {
+ line = 0 // not a real line number
+ }
+ buf := &bytes.Buffer{}
+ buf.Grow(30)
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ buf.WriteString(level.Char())
+ twoDigits(buf, int(month))
+ twoDigits(buf, day)
+ buf.WriteByte(' ')
+ twoDigits(buf, hour)
+ buf.WriteByte(':')
+ twoDigits(buf, minute)
+ buf.WriteByte(':')
+ twoDigits(buf, second)
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
+ buf.WriteByte('Z')
+ buf.WriteByte(' ')
+ buf.WriteString(strconv.Itoa(pid))
+ buf.WriteByte(' ')
+ buf.WriteString(file)
+ buf.WriteByte(':')
+ buf.WriteString(strconv.Itoa(line))
+ buf.WriteByte(']')
+ buf.WriteByte(' ')
+ return buf.Bytes()
+}
+
+const digits = "0123456789"
+
+func twoDigits(b *bytes.Buffer, d int) {
+ c2 := digits[d%10]
+ d /= 10
+ c1 := digits[d%10]
+ b.WriteByte(c1)
+ b.WriteByte(c2)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go
new file mode 100644
index 0000000..44b8cd3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go
@@ -0,0 +1,49 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+// Here's where the opinionation comes in. We need some sensible defaults,
+// especially after taking over the log package. Your project (whatever it may
+// be) may see things differently. That's okay; there should be no defaults in
+// the main package that cannot be controlled or overridden programatically,
+// otherwise it's a bug. Doing so is creating your own init_log.go file much
+// like this one.
+
+func init() {
+ initHijack()
+
+ // Go `log` pacakge uses os.Stderr.
+ SetFormatter(NewDefaultFormatter(os.Stderr))
+ SetGlobalLogLevel(INFO)
+}
+
+func NewDefaultFormatter(out io.Writer) Formatter {
+ if syscall.Getppid() == 1 {
+ // We're running under init, which may be systemd.
+ f, err := NewJournaldFormatter()
+ if err == nil {
+ return f
+ }
+ }
+ return NewPrettyFormatter(out, false)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go
new file mode 100644
index 0000000..4553050
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import "os"
+
+func init() {
+ initHijack()
+
+ // Go `log` package uses os.Stderr.
+ SetFormatter(NewPrettyFormatter(os.Stderr, false))
+ SetGlobalLogLevel(INFO)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
new file mode 100644
index 0000000..72e0520
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/go-systemd/journal"
+)
+
+func NewJournaldFormatter() (Formatter, error) {
+ if !journal.Enabled() {
+ return nil, errors.New("No systemd detected")
+ }
+ return &journaldFormatter{}, nil
+}
+
+type journaldFormatter struct{}
+
+func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+ var pri journal.Priority
+ switch l {
+ case CRITICAL:
+ pri = journal.PriCrit
+ case ERROR:
+ pri = journal.PriErr
+ case WARNING:
+ pri = journal.PriWarning
+ case NOTICE:
+ pri = journal.PriNotice
+ case INFO:
+ pri = journal.PriInfo
+ case DEBUG:
+ pri = journal.PriDebug
+ case TRACE:
+ pri = journal.PriDebug
+ default:
+ panic("Unhandled loglevel")
+ }
+ msg := fmt.Sprint(entries...)
+ tags := map[string]string{
+ "PACKAGE": pkg,
+ "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+ }
+ err := journal.Send(msg, pri, tags)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+}
+
+func (j *journaldFormatter) Flush() {}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
new file mode 100644
index 0000000..970086b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
@@ -0,0 +1,39 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "log"
+)
+
+func initHijack() {
+ pkg := NewPackageLogger("log", "")
+ w := packageWriter{pkg}
+ log.SetFlags(0)
+ log.SetPrefix("")
+ log.SetOutput(w)
+}
+
+type packageWriter struct {
+ pl *PackageLogger
+}
+
+func (p packageWriter) Write(b []byte) (int, error) {
+ if p.pl.level < INFO {
+ return 0, nil
+ }
+ p.pl.internalLog(calldepth+2, INFO, string(b))
+ return len(b), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go
new file mode 100644
index 0000000..8495448
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go
@@ -0,0 +1,240 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "errors"
+ "strings"
+ "sync"
+)
+
+// LogLevel is the set of all log levels.
+type LogLevel int8
+
+const (
+ // CRITICAL is the lowest log level; only errors which will end the program will be propagated.
+ CRITICAL LogLevel = iota - 1
+ // ERROR is for errors that are not fatal but lead to troubling behavior.
+ ERROR
+ // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
+ WARNING
+ // NOTICE is for normal but significant conditions.
+ NOTICE
+ // INFO is a log level for common, everyday log updates.
+ INFO
+ // DEBUG is the default hidden level for more verbose updates about internal processes.
+ DEBUG
+ // TRACE is for (potentially) call by call tracing of programs.
+ TRACE
+)
+
+// Char returns a single-character representation of the log level.
+func (l LogLevel) Char() string {
+ switch l {
+ case CRITICAL:
+ return "C"
+ case ERROR:
+ return "E"
+ case WARNING:
+ return "W"
+ case NOTICE:
+ return "N"
+ case INFO:
+ return "I"
+ case DEBUG:
+ return "D"
+ case TRACE:
+ return "T"
+ default:
+ panic("Unhandled loglevel")
+ }
+}
+
+// String returns a multi-character representation of the log level.
+func (l LogLevel) String() string {
+ switch l {
+ case CRITICAL:
+ return "CRITICAL"
+ case ERROR:
+ return "ERROR"
+ case WARNING:
+ return "WARNING"
+ case NOTICE:
+ return "NOTICE"
+ case INFO:
+ return "INFO"
+ case DEBUG:
+ return "DEBUG"
+ case TRACE:
+ return "TRACE"
+ default:
+ panic("Unhandled loglevel")
+ }
+}
+
+// Update using the given string value. Fulfills the flag.Value interface.
+func (l *LogLevel) Set(s string) error {
+ value, err := ParseLevel(s)
+ if err != nil {
+ return err
+ }
+
+ *l = value
+ return nil
+}
+
+// ParseLevel translates some potential loglevel strings into their corresponding levels.
+func ParseLevel(s string) (LogLevel, error) {
+ switch s {
+ case "CRITICAL", "C":
+ return CRITICAL, nil
+ case "ERROR", "0", "E":
+ return ERROR, nil
+ case "WARNING", "1", "W":
+ return WARNING, nil
+ case "NOTICE", "2", "N":
+ return NOTICE, nil
+ case "INFO", "3", "I":
+ return INFO, nil
+ case "DEBUG", "4", "D":
+ return DEBUG, nil
+ case "TRACE", "5", "T":
+ return TRACE, nil
+ }
+ return CRITICAL, errors.New("couldn't parse log level " + s)
+}
+
+type RepoLogger map[string]*PackageLogger
+
+type loggerStruct struct {
+ sync.Mutex
+ repoMap map[string]RepoLogger
+ formatter Formatter
+}
+
+// logger is the global logger
+var logger = new(loggerStruct)
+
+// SetGlobalLogLevel sets the log level for all packages in all repositories
+// registered with capnslog.
+func SetGlobalLogLevel(l LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ for _, r := range logger.repoMap {
+ r.setRepoLogLevelInternal(l)
+ }
+}
+
+// GetRepoLogger may return the handle to the repository's set of packages' loggers.
+func GetRepoLogger(repo string) (RepoLogger, error) {
+ logger.Lock()
+ defer logger.Unlock()
+ r, ok := logger.repoMap[repo]
+ if !ok {
+ return nil, errors.New("no packages registered for repo " + repo)
+ }
+ return r, nil
+}
+
+// MustRepoLogger returns the handle to the repository's packages' loggers.
+func MustRepoLogger(repo string) RepoLogger {
+ r, err := GetRepoLogger(repo)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// SetRepoLogLevel sets the log level for all packages in the repository.
+func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ r.setRepoLogLevelInternal(l)
+}
+
+func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
+ for _, v := range r {
+ v.level = l
+ }
+}
+
+// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
+// order, and returns a map of the results, for use in SetLogLevel.
+func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
+ setlist := strings.Split(conf, ",")
+ out := make(map[string]LogLevel)
+ for _, setstring := range setlist {
+ setting := strings.Split(setstring, "=")
+ if len(setting) != 2 {
+ return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
+ }
+ l, err := ParseLevel(setting[1])
+ if err != nil {
+ return nil, err
+ }
+ out[setting[0]] = l
+ }
+ return out, nil
+}
+
+// SetLogLevel takes a map of package names within a repository to their desired
+// loglevel, and sets the levels appropriately. Unknown packages are ignored.
+// "*" is a special package name that corresponds to all packages, and will be
+// processed first.
+func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ if l, ok := m["*"]; ok {
+ r.setRepoLogLevelInternal(l)
+ }
+ for k, v := range m {
+ l, ok := r[k]
+ if !ok {
+ continue
+ }
+ l.level = v
+ }
+}
+
+// SetFormatter sets the formatting function for all logs.
+func SetFormatter(f Formatter) {
+ logger.Lock()
+ defer logger.Unlock()
+ logger.formatter = f
+}
+
+// NewPackageLogger creates a package logger object.
+// This should be defined as a global var in your package, referencing your repo.
+func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
+ logger.Lock()
+ defer logger.Unlock()
+ if logger.repoMap == nil {
+ logger.repoMap = make(map[string]RepoLogger)
+ }
+ r, rok := logger.repoMap[repo]
+ if !rok {
+ logger.repoMap[repo] = make(RepoLogger)
+ r = logger.repoMap[repo]
+ }
+ p, pok := r[pkg]
+ if !pok {
+ r[pkg] = &PackageLogger{
+ pkg: pkg,
+ level: INFO,
+ }
+ p = r[pkg]
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
new file mode 100644
index 0000000..e2c4668
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
@@ -0,0 +1,171 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "fmt"
+ "os"
+)
+
+type PackageLogger struct {
+ pkg string
+ level LogLevel
+}
+
+const calldepth = 2
+
+func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
+ logger.Lock()
+ defer logger.Unlock()
+ if inLevel != CRITICAL && p.level < inLevel {
+ return
+ }
+ if logger.formatter != nil {
+ logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
+ }
+}
+
+func (p *PackageLogger) LevelAt(l LogLevel) bool {
+ logger.Lock()
+ defer logger.Unlock()
+ return p.level >= l
+}
+
+// Log a formatted string at any level between ERROR and TRACE
+func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
+ p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
+}
+
+// Log a message at any level between ERROR and TRACE
+func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
+ p.internalLog(calldepth, l, fmt.Sprint(args...))
+}
+
+// log stdlib compatibility
+
+func (p *PackageLogger) Println(args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
+}
+
+func (p *PackageLogger) Printf(format string, args ...interface{}) {
+ p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Print(args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprint(args...))
+}
+
+// Panic and fatal
+
+func (p *PackageLogger) Panicf(format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ panic(s)
+}
+
+func (p *PackageLogger) Panic(args ...interface{}) {
+ s := fmt.Sprint(args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ panic(s)
+}
+
+func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
+ p.Logf(CRITICAL, format, args...)
+ os.Exit(1)
+}
+
+func (p *PackageLogger) Fatal(args ...interface{}) {
+ s := fmt.Sprint(args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ os.Exit(1)
+}
+
+// Error Functions
+
+func (p *PackageLogger) Errorf(format string, args ...interface{}) {
+ p.Logf(ERROR, format, args...)
+}
+
+func (p *PackageLogger) Error(entries ...interface{}) {
+ p.internalLog(calldepth, ERROR, entries...)
+}
+
+// Warning Functions
+
+func (p *PackageLogger) Warningf(format string, args ...interface{}) {
+ p.Logf(WARNING, format, args...)
+}
+
+func (p *PackageLogger) Warning(entries ...interface{}) {
+ p.internalLog(calldepth, WARNING, entries...)
+}
+
+// Notice Functions
+
+func (p *PackageLogger) Noticef(format string, args ...interface{}) {
+ p.Logf(NOTICE, format, args...)
+}
+
+func (p *PackageLogger) Notice(entries ...interface{}) {
+ p.internalLog(calldepth, NOTICE, entries...)
+}
+
+// Info Functions
+
+func (p *PackageLogger) Infof(format string, args ...interface{}) {
+ p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Info(entries ...interface{}) {
+ p.internalLog(calldepth, INFO, entries...)
+}
+
+// Debug Functions
+
+func (p *PackageLogger) Debugf(format string, args ...interface{}) {
+ if p.level < DEBUG {
+ return
+ }
+ p.Logf(DEBUG, format, args...)
+}
+
+func (p *PackageLogger) Debug(entries ...interface{}) {
+ if p.level < DEBUG {
+ return
+ }
+ p.internalLog(calldepth, DEBUG, entries...)
+}
+
+// Trace Functions
+
+func (p *PackageLogger) Tracef(format string, args ...interface{}) {
+ if p.level < TRACE {
+ return
+ }
+ p.Logf(TRACE, format, args...)
+}
+
+func (p *PackageLogger) Trace(entries ...interface{}) {
+ if p.level < TRACE {
+ return
+ }
+ p.internalLog(calldepth, TRACE, entries...)
+}
+
+func (p *PackageLogger) Flush() {
+ logger.Lock()
+ defer logger.Unlock()
+ logger.formatter.Flush()
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
new file mode 100644
index 0000000..4be5a1f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "fmt"
+ "log/syslog"
+)
+
+func NewSyslogFormatter(w *syslog.Writer) Formatter {
+ return &syslogFormatter{w}
+}
+
+func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
+ w, err := syslog.New(syslog.LOG_DEBUG, tag)
+ if err != nil {
+ return nil, err
+ }
+ return NewSyslogFormatter(w), nil
+}
+
+type syslogFormatter struct {
+ w *syslog.Writer
+}
+
+func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+ for _, entry := range entries {
+ str := fmt.Sprint(entry)
+ switch l {
+ case CRITICAL:
+ s.w.Crit(str)
+ case ERROR:
+ s.w.Err(str)
+ case WARNING:
+ s.w.Warning(str)
+ case NOTICE:
+ s.w.Notice(str)
+ case INFO:
+ s.w.Info(str)
+ case DEBUG:
+ s.w.Debug(str)
+ case TRACE:
+ s.w.Debug(str)
+ default:
+ panic("Unhandled loglevel")
+ }
+ }
+}
+
+func (s *syslogFormatter) Flush() {
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/health/README.md b/src/kube2msb/vendor/github.com/coreos/pkg/health/README.md
new file mode 100644
index 0000000..5ec34c2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/health/README.md
@@ -0,0 +1,11 @@
+health
+====
+
+A simple framework for implementing an HTTP health check endpoint on servers.
+
+Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`.
+
+### Documentation
+
+For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health)
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/health/health.go b/src/kube2msb/vendor/github.com/coreos/pkg/health/health.go
new file mode 100644
index 0000000..a1c3610
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/health/health.go
@@ -0,0 +1,127 @@
+package health
+
+import (
+ "expvar"
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/coreos/pkg/httputil"
+)
+
+// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
+type Checkable interface {
+ Healthy() error
+}
+
+// Checker provides a way to make an endpoint which can be probed for system health.
+type Checker struct {
+ // Checks are the Checkables to be checked when probing.
+ Checks []Checkable
+
+ // Unhealthyhandler is called when one or more of the checks are unhealthy.
+ // If not provided DefaultUnhealthyHandler is called.
+ UnhealthyHandler UnhealthyHandler
+
+ // HealthyHandler is called when all checks are healthy.
+ // If not provided, DefaultHealthyHandler is called.
+ HealthyHandler http.HandlerFunc
+}
+
+func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ unhealthyHandler := c.UnhealthyHandler
+ if unhealthyHandler == nil {
+ unhealthyHandler = DefaultUnhealthyHandler
+ }
+
+ successHandler := c.HealthyHandler
+ if successHandler == nil {
+ successHandler = DefaultHealthyHandler
+ }
+
+ if r.Method != "GET" {
+ w.Header().Set("Allow", "GET")
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+
+ if err := Check(c.Checks); err != nil {
+ unhealthyHandler(w, r, err)
+ return
+ }
+
+ successHandler(w, r)
+}
+
+type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
+
+type StatusResponse struct {
+ Status string `json:"status"`
+ Details *StatusResponseDetails `json:"details,omitempty"`
+}
+
+type StatusResponseDetails struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func Check(checks []Checkable) (err error) {
+ errs := []error{}
+ for _, c := range checks {
+ if e := c.Healthy(); e != nil {
+ errs = append(errs, e)
+ }
+ }
+
+ switch len(errs) {
+ case 0:
+ err = nil
+ case 1:
+ err = errs[0]
+ default:
+ err = fmt.Errorf("multiple health check failure: %v", errs)
+ }
+
+ return
+}
+
+func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
+ err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
+ Status: "ok",
+ })
+ if err != nil {
+ // TODO(bobbyrullo): replace with logging from new logging pkg,
+ // once it lands.
+ log.Printf("Failed to write JSON response: %v", err)
+ }
+}
+
+func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
+ writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
+ Status: "error",
+ Details: &StatusResponseDetails{
+ Code: http.StatusInternalServerError,
+ Message: err.Error(),
+ },
+ })
+ if writeErr != nil {
+ // TODO(bobbyrullo): replace with logging from new logging pkg,
+ // once it lands.
+ log.Printf("Failed to write JSON response: %v", err)
+ }
+}
+
+// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
+func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintf(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md
new file mode 100644
index 0000000..44fa751
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md
@@ -0,0 +1,13 @@
+httputil
+====
+
+Common code for dealing with HTTP.
+
+Includes:
+
+* Code for returning JSON responses.
+
+### Documentation
+
+Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil)
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go
new file mode 100644
index 0000000..c37a37b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go
@@ -0,0 +1,21 @@
+package httputil
+
+import (
+ "net/http"
+ "time"
+)
+
+// DeleteCookies effectively deletes all named cookies
+// by wiping all data and setting to expire immediately.
+func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
+ for _, n := range cookieNames {
+ c := &http.Cookie{
+ Name: n,
+ Value: "",
+ Path: "/",
+ MaxAge: -1,
+ Expires: time.Time{},
+ }
+ http.SetCookie(w, c)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go
new file mode 100644
index 0000000..0b09235
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go
@@ -0,0 +1,27 @@
+package httputil
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+const (
+ JSONContentType = "application/json"
+)
+
+func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
+ enc, err := json.Marshal(resp)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ return err
+ }
+
+ w.Header().Set("Content-Type", JSONContentType)
+ w.WriteHeader(code)
+
+ _, err = w.Write(enc)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go b/src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go
new file mode 100644
index 0000000..b34fb49
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go
@@ -0,0 +1,15 @@
+package timeutil
+
+import (
+ "time"
+)
+
+func ExpBackoff(prev, max time.Duration) time.Duration {
+ if prev == 0 {
+ return time.Second
+ }
+ if prev > max/2 {
+ return max
+ }
+ return 2 * prev
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE b/src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..2a7cfd2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..565bf58
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,151 @@
+// Copyright (c) 2015 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine and "-tags disableunsafe"
+// is not added to the go build command line.
+// +build !appengine,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+ if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..457e412
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2015 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when either the code is running on Google App Engine or "-tags disableunsafe"
+// is added to the go build command line.
+// +build appengine disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..14f02dc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..ee1ab07
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "disableunsafe" build tag specified.
+ DisablePointerMethods bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..5be0c40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..a0ff95e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..ecf3b80
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..d8233f5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/LICENSE b/src/kube2msb/vendor/github.com/docker/distribution/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go
new file mode 100644
index 0000000..31d821b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go
@@ -0,0 +1,139 @@
+package digest
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "regexp"
+ "strings"
+)
+
+const (
+ // DigestSha256EmptyTar is the canonical sha256 digest of empty data
+ DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+)
+
+// Digest allows simple protection of hex formatted digest strings, prefixed
+// by their algorithm. Strings of type Digest have some guarantee of being in
+// the correct format and it provides quick access to the components of a
+// digest string.
+//
+// The following is an example of the contents of Digest types:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// This allows to abstract the digest behind this type and work only in those
+// terms.
+type Digest string
+
+// NewDigest returns a Digest from alg and a hash.Hash object.
+func NewDigest(alg Algorithm, h hash.Hash) Digest {
+ return NewDigestFromBytes(alg, h.Sum(nil))
+}
+
+// NewDigestFromBytes returns a new digest from the byte contents of p.
+// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
+// functions. This is also useful for rebuilding digests from binary
+// serializations.
+func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
+ return Digest(fmt.Sprintf("%s:%x", alg, p))
+}
+
+// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
+func NewDigestFromHex(alg, hex string) Digest {
+ return Digest(fmt.Sprintf("%s:%s", alg, hex))
+}
+
+// DigestRegexp matches valid digest types.
+var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+
+// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
+var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
+
+var (
+ // ErrDigestInvalidFormat returned when digest format invalid.
+ ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
+
+ // ErrDigestInvalidLength returned when digest has invalid length.
+ ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
+
+ // ErrDigestUnsupported returned when the digest algorithm is unsupported.
+ ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
+)
+
+// ParseDigest parses s and returns the validated digest object. An error will
+// be returned if the format is invalid.
+func ParseDigest(s string) (Digest, error) {
+ d := Digest(s)
+
+ return d, d.Validate()
+}
+
+// FromReader returns the most valid digest for the underlying content using
+// the canonical digest algorithm.
+func FromReader(rd io.Reader) (Digest, error) {
+ return Canonical.FromReader(rd)
+}
+
+// FromBytes digests the input and returns a Digest.
+func FromBytes(p []byte) Digest {
+ return Canonical.FromBytes(p)
+}
+
+// Validate checks that the contents of d is a valid digest, returning an
+// error if not.
+func (d Digest) Validate() error {
+ s := string(d)
+
+ if !DigestRegexpAnchored.MatchString(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ i := strings.Index(s, ":")
+ if i < 0 {
+ return ErrDigestInvalidFormat
+ }
+
+ // case: "sha256:" with no hex.
+ if i+1 == len(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ switch algorithm := Algorithm(s[:i]); algorithm {
+ case SHA256, SHA384, SHA512:
+ if algorithm.Size()*2 != len(s[i+1:]) {
+ return ErrDigestInvalidLength
+ }
+ break
+ default:
+ return ErrDigestUnsupported
+ }
+
+ return nil
+}
+
+// Algorithm returns the algorithm portion of the digest. This will panic if
+// the underlying digest is not in a valid format.
+func (d Digest) Algorithm() Algorithm {
+ return Algorithm(d[:d.sepIndex()])
+}
+
+// Hex returns the hex digest portion of the digest. This will panic if the
+// underlying digest is not in a valid format.
+func (d Digest) Hex() string {
+ return string(d[d.sepIndex()+1:])
+}
+
+func (d Digest) String() string {
+ return string(d)
+}
+
+func (d Digest) sepIndex() int {
+ i := strings.Index(string(d), ":")
+
+ if i < 0 {
+ panic("could not find ':' in digest: " + d)
+ }
+
+ return i
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go
new file mode 100644
index 0000000..f3105a4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go
@@ -0,0 +1,155 @@
+package digest
+
+import (
+ "crypto"
+ "fmt"
+ "hash"
+ "io"
+)
+
+// Algorithm identifies and implementation of a digester by an identifier.
+// Note the that this defines both the hash algorithm used and the string
+// encoding.
+type Algorithm string
+
+// supported digest types
+const (
+ SHA256 Algorithm = "sha256" // sha256 with hex encoding
+ SHA384 Algorithm = "sha384" // sha384 with hex encoding
+ SHA512 Algorithm = "sha512" // sha512 with hex encoding
+
+ // Canonical is the primary digest algorithm used with the distribution
+ // project. Other digests may be used but this one is the primary storage
+ // digest.
+ Canonical = SHA256
+)
+
+var (
+ // TODO(stevvooe): Follow the pattern of the standard crypto package for
+ // registration of digests. Effectively, we are a registerable set and
+ // common symbol access.
+
+ // algorithms maps values to hash.Hash implementations. Other algorithms
+ // may be available but they cannot be calculated by the digest package.
+ algorithms = map[Algorithm]crypto.Hash{
+ SHA256: crypto.SHA256,
+ SHA384: crypto.SHA384,
+ SHA512: crypto.SHA512,
+ }
+)
+
+// Available returns true if the digest type is available for use. If this
+// returns false, New and Hash will return nil.
+func (a Algorithm) Available() bool {
+ h, ok := algorithms[a]
+ if !ok {
+ return false
+ }
+
+ // check availability of the hash, as well
+ return h.Available()
+}
+
+func (a Algorithm) String() string {
+ return string(a)
+}
+
+// Size returns number of bytes returned by the hash.
+func (a Algorithm) Size() int {
+ h, ok := algorithms[a]
+ if !ok {
+ return 0
+ }
+ return h.Size()
+}
+
+// Set implemented to allow use of Algorithm as a command line flag.
+func (a *Algorithm) Set(value string) error {
+ if value == "" {
+ *a = Canonical
+ } else {
+ // just do a type conversion, support is queried with Available.
+ *a = Algorithm(value)
+ }
+
+ return nil
+}
+
+// New returns a new digester for the specified algorithm. If the algorithm
+// does not have a digester implementation, nil will be returned. This can be
+// checked by calling Available before calling New.
+func (a Algorithm) New() Digester {
+ return &digester{
+ alg: a,
+ hash: a.Hash(),
+ }
+}
+
+// Hash returns a new hash as used by the algorithm. If not available, the
+// method will panic. Check Algorithm.Available() before calling.
+func (a Algorithm) Hash() hash.Hash {
+ if !a.Available() {
+ // NOTE(stevvooe): A missing hash is usually a programming error that
+ // must be resolved at compile time. We don't import in the digest
+ // package to allow users to choose their hash implementation (such as
+ // when using stevvooe/resumable or a hardware accelerated package).
+ //
+ // Applications that may want to resolve the hash at runtime should
+ // call Algorithm.Available before call Algorithm.Hash().
+ panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
+ }
+
+ return algorithms[a].New()
+}
+
+// FromReader returns the digest of the reader using the algorithm.
+func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
+ digester := a.New()
+
+ if _, err := io.Copy(digester.Hash(), rd); err != nil {
+ return "", err
+ }
+
+ return digester.Digest(), nil
+}
+
+// FromBytes digests the input and returns a Digest.
+func (a Algorithm) FromBytes(p []byte) Digest {
+ digester := a.New()
+
+ if _, err := digester.Hash().Write(p); err != nil {
+ // Writes to a Hash should never fail. None of the existing
+ // hash implementations in the stdlib or hashes vendored
+ // here can return errors from Write. Having a panic in this
+ // condition instead of having FromBytes return an error value
+ // avoids unnecessary error handling paths in all callers.
+ panic("write to hash function returned error: " + err.Error())
+ }
+
+ return digester.Digest()
+}
+
+// TODO(stevvooe): Allow resolution of verifiers using the digest type and
+// this registration system.
+
+// Digester calculates the digest of written data. Writes should go directly
+// to the return value of Hash, while calling Digest will return the current
+// value of the digest.
+type Digester interface {
+ Hash() hash.Hash // provides direct access to underlying hash instance.
+ Digest() Digest
+}
+
+// digester provides a simple digester definition that embeds a hasher.
+type digester struct {
+ alg Algorithm
+ hash hash.Hash
+}
+
+func (d *digester) Hash() hash.Hash {
+ return d.hash
+}
+
+func (d *digester) Digest() Digest {
+ return NewDigest(d.alg, d.hash)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go
new file mode 100644
index 0000000..f64b0db
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go
@@ -0,0 +1,42 @@
+// Package digest provides a generalized type to opaquely represent message
+// digests and their operations within the registry. The Digest type is
+// designed to serve as a flexible identifier in a content-addressable system.
+// More importantly, it provides tools and wrappers to work with
+// hash.Hash-based digests with little effort.
+//
+// Basics
+//
+// The format of a digest is simply a string with two parts, dubbed the
+// "algorithm" and the "digest", separated by a colon:
+//
+// <algorithm>:<digest>
+//
+// An example of a sha256 digest representation follows:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// In this case, the string "sha256" is the algorithm and the hex bytes are
+// the "digest".
+//
+// Because the Digest type is simply a string, once a valid Digest is
+// obtained, comparisons are cheap, quick and simple to express with the
+// standard equality operator.
+//
+// Verification
+//
+// The main benefit of using the Digest type is simple verification against a
+// given digest. The Verifier interface, modeled after the stdlib hash.Hash
+// interface, provides a common write sink for digest verification. After
+// writing is complete, calling the Verifier.Verified method will indicate
+// whether or not the stream of bytes matches the target digest.
+//
+// Missing Features
+//
+// In addition to the above, we intend to add the following features to this
+// package:
+//
+// 1. A Digester type that supports write sink digest calculation.
+//
+// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
+//
+package digest
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/set.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/set.go
new file mode 100644
index 0000000..4b9313c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/set.go
@@ -0,0 +1,245 @@
+package digest
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // ErrDigestNotFound is used when a matching digest
+ // could not be found in a set.
+ ErrDigestNotFound = errors.New("digest not found")
+
+ // ErrDigestAmbiguous is used when multiple digests
+ // are found in a set. None of the matching digests
+ // should be considered valid matches.
+ ErrDigestAmbiguous = errors.New("ambiguous digest string")
+)
+
+// Set is used to hold a unique set of digests which
+// may be easily referenced by easily referenced by a string
+// representation of the digest as well as short representation.
+// The uniqueness of the short representation is based on other
+// digests in the set. If digests are omitted from this set,
+// collisions in a larger set may not be detected, therefore it
+// is important to always do short representation lookups on
+// the complete set of digests. To mitigate collisions, an
+// appropriately long short code should be used.
+type Set struct {
+ mutex sync.RWMutex
+ entries digestEntries
+}
+
+// NewSet creates an empty set of digests
+// which may have digests added.
+func NewSet() *Set {
+ return &Set{
+ entries: digestEntries{},
+ }
+}
+
+// checkShortMatch checks whether two digests match as either whole
+// values or short values. This function does not test equality,
+// rather whether the second value could match against the first
+// value.
+func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
+ if len(hex) == len(shortHex) {
+ if hex != shortHex {
+ return false
+ }
+ if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ } else if !strings.HasPrefix(hex, shortHex) {
+ return false
+ } else if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ return true
+}
+
+// Lookup looks for a digest matching the given string representation.
+// If no digests could be found ErrDigestNotFound will be returned
+// with an empty digest value. If multiple matches are found
+// ErrDigestAmbiguous will be returned with an empty digest value.
+func (dst *Set) Lookup(d string) (Digest, error) {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ if len(dst.entries) == 0 {
+ return "", ErrDigestNotFound
+ }
+ var (
+ searchFunc func(int) bool
+ alg Algorithm
+ hex string
+ )
+ dgst, err := ParseDigest(d)
+ if err == ErrDigestInvalidFormat {
+ hex = d
+ searchFunc = func(i int) bool {
+ return dst.entries[i].val >= d
+ }
+ } else {
+ hex = dgst.Hex()
+ alg = dgst.Algorithm()
+ searchFunc = func(i int) bool {
+ if dst.entries[i].val == hex {
+ return dst.entries[i].alg >= alg
+ }
+ return dst.entries[i].val >= hex
+ }
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
+ return "", ErrDigestNotFound
+ }
+ if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
+ return dst.entries[idx].digest, nil
+ }
+ if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
+ return "", ErrDigestAmbiguous
+ }
+
+ return dst.entries[idx].digest, nil
+}
+
+// Add adds the given digest to the set. An error will be returned
+// if the given digest is invalid. If the digest already exists in the
+// set, this operation will be a no-op.
+func (dst *Set) Add(d Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) {
+ dst.entries = append(dst.entries, entry)
+ return nil
+ } else if dst.entries[idx].digest == d {
+ return nil
+ }
+
+ entries := append(dst.entries, nil)
+ copy(entries[idx+1:], entries[idx:len(entries)-1])
+ entries[idx] = entry
+ dst.entries = entries
+ return nil
+}
+
+// Remove removes the given digest from the set. An err will be
+// returned if the given digest is invalid. If the digest does
+// not exist in the set, this operation will be a no-op.
+func (dst *Set) Remove(d Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ // Not found if idx is after or value at idx is not digest
+ if idx == len(dst.entries) || dst.entries[idx].digest != d {
+ return nil
+ }
+
+ entries := dst.entries
+ copy(entries[idx:], entries[idx+1:])
+ entries = entries[:len(entries)-1]
+ dst.entries = entries
+
+ return nil
+}
+
+// All returns all the digests in the set
+func (dst *Set) All() []Digest {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ retValues := make([]Digest, len(dst.entries))
+ for i := range dst.entries {
+ retValues[i] = dst.entries[i].digest
+ }
+
+ return retValues
+}
+
+// ShortCodeTable returns a map of Digest to unique short codes. The
+// length represents the minimum value, the maximum length may be the
+// entire value of digest if uniqueness cannot be achieved without the
+// full value. This function will attempt to make short codes as short
+// as possible to be unique.
+func ShortCodeTable(dst *Set, length int) map[Digest]string {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ m := make(map[Digest]string, len(dst.entries))
+ l := length
+ resetIdx := 0
+ for i := 0; i < len(dst.entries); i++ {
+ var short string
+ extended := true
+ for extended {
+ extended = false
+ if len(dst.entries[i].val) <= l {
+ short = dst.entries[i].digest.String()
+ } else {
+ short = dst.entries[i].val[:l]
+ for j := i + 1; j < len(dst.entries); j++ {
+ if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
+ if j > resetIdx {
+ resetIdx = j
+ }
+ extended = true
+ } else {
+ break
+ }
+ }
+ if extended {
+ l++
+ }
+ }
+ }
+ m[dst.entries[i].digest] = short
+ if i >= resetIdx {
+ l = length
+ }
+ }
+ return m
+}
+
+type digestEntry struct {
+ alg Algorithm
+ val string
+ digest Digest
+}
+
+type digestEntries []*digestEntry
+
+func (d digestEntries) Len() int {
+ return len(d)
+}
+
+func (d digestEntries) Less(i, j int) bool {
+ if d[i].val != d[j].val {
+ return d[i].val < d[j].val
+ }
+ return d[i].alg < d[j].alg
+}
+
+func (d digestEntries) Swap(i, j int) {
+ d[i], d[j] = d[j], d[i]
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go
new file mode 100644
index 0000000..9af3be1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go
@@ -0,0 +1,44 @@
+package digest
+
+import (
+ "hash"
+ "io"
+)
+
+// Verifier presents a general verification interface to be used with message
+// digests and other byte stream verifications. Users instantiate a Verifier
+// from one of the various methods, write the data under test to it then check
+// the result with the Verified method.
+type Verifier interface {
+ io.Writer
+
+ // Verified will return true if the content written to Verifier matches
+ // the digest.
+ Verified() bool
+}
+
+// NewDigestVerifier returns a verifier that compares the written bytes
+// against a passed in digest.
+func NewDigestVerifier(d Digest) (Verifier, error) {
+ if err := d.Validate(); err != nil {
+ return nil, err
+ }
+
+ return hashVerifier{
+ hash: d.Algorithm().Hash(),
+ digest: d,
+ }, nil
+}
+
+type hashVerifier struct {
+ digest Digest
+ hash hash.Hash
+}
+
+func (hv hashVerifier) Write(p []byte) (n int, err error) {
+ return hv.hash.Write(p)
+}
+
+func (hv hashVerifier) Verified() bool {
+ return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go b/src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go
new file mode 100644
index 0000000..bb09fa2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go
@@ -0,0 +1,334 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [hostname '/'] component ['/' component]*
+// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
+// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+package reference
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/docker/distribution/digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with hostname and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+func SplitHostname(named Named) (string, string) {
+ name := named.Name()
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ // TODO(dmcgowan): Provide more specific and helpful error
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ ref := reference{
+ name: matches[1],
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.ParseDigest(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name, otherwise an error is
+// returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ ref, err := Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+ if !anchoredNameRegexp.MatchString(name) {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository(name), nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ return taggedReference{
+ name: name.Name(),
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ return canonicalReference{
+ name: name.Name(),
+ digest: digest,
+ }, nil
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.name == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ name: ref.name,
+ digest: ref.digest,
+ }
+ }
+ return repository(ref.name)
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ name: ref.name,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ name string
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.name + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Name() string {
+ return r.name
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository string
+
+func (r repository) String() string {
+ return string(r)
+}
+
+func (r repository) Name() string {
+ return string(r)
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return d.String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ name string
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.name + ":" + t.tag
+}
+
+func (t taggedReference) Name() string {
+ return t.name
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ name string
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.name + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Name() string {
+ return c.name
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go b/src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go
new file mode 100644
index 0000000..9a7d366
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go
@@ -0,0 +1,124 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // hostnameComponentRegexp restricts the registry hostname component of a
+ // repository name to start with a component as defined by hostnameRegexp
+ // and followed by an optional port.
+ hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // hostnameRegexp defines the structure of potential hostname components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ hostnameRegexp = expression(
+ hostnameComponentRegexp,
+ optional(repeated(literal(`.`), hostnameComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the hostname and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(hostnameRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // hostname and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(hostnameRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md b/src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 0000000..9ea86d7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs
new file mode 100644
index 0000000..e26cd4f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS b/src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 0000000..477be8b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,27 @@
+# go-connections maintainers file
+#
+# This file describes who runs the docker/go-connections project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "calavera",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+ [people.calavera]
+ Name = "David Calavera"
+ Email = "david.calavera@gmail.com"
+ GitHub = "calavera"
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/README.md b/src/kube2msb/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 0000000..3ce4d79
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,18 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code
+is released under the Apache 2.0 license. The README.md file, and files in the
+"docs" folder are licensed under the Creative Commons Attribution 4.0
+International License under the terms and conditions set forth in the file
+"LICENSE.docs". You may obtain a duplicate copy of the same license, titled
+CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/circle.yml b/src/kube2msb/vendor/github.com/docker/go-units/circle.yml
new file mode 100644
index 0000000..9043b35
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get github.com/golang/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/duration.go b/src/kube2msb/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 0000000..c219a8a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/size.go b/src/kube2msb/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 0000000..3b59daf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/ulimit.go b/src/kube2msb/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 0000000..5ac7fd8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md
new file mode 100644
index 0000000..070bca7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md
@@ -0,0 +1,163 @@
+Change history of go-restful
+=
+2016-02-14
+- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
+- add constructors for custom entity accessors for xml and json
+
+2015-09-27
+- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
+
+2015-09-25
+- fixed problem with changing Header after WriteHeader (issue 235)
+
+2015-09-14
+- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
+- added support for custom EntityReaderWriters.
+
+2015-08-06
+- add support for reading entities from compressed request content
+- use sync.Pool for compressors of http response and request body
+- add Description to Parameter for documentation in Swagger UI
+
+2015-03-20
+- add configurable logging
+
+2015-03-18
+- if not specified, the Operation is derived from the Route function
+
+2015-03-17
+- expose Parameter creation functions
+- make trace logger an interface
+- fix OPTIONSFilter
+- customize rendering of ServiceError
+- JSR311 router now handles wildcards
+- add Notes to Route
+
+2014-11-27
+- (api add) PrettyPrint per response. (as proposed in #167)
+
+2014-11-12
+- (api add) ApiVersion(.) for documentation in Swagger UI
+
+2014-11-10
+- (api change) struct fields tagged with "description" show up in Swagger UI
+
+2014-10-31
+- (api change) ReturnsError -> Returns
+- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
+- fix swagger nested structs
+- sort Swagger response messages by code
+
+2014-10-23
+- (api add) ReturnsError allows you to document Http codes in swagger
+- fixed problem with greedy CurlyRouter
+- (api add) Access-Control-Max-Age in CORS
+- add tracing functionality (injectable) for debugging purposes
+- support JSON parse 64bit int
+- fix empty parameters for swagger
+- WebServicesUrl is now optional for swagger
+- fixed duplicate AccessControlAllowOrigin in CORS
+- (api change) expose ServeMux in container
+- (api add) added AllowedDomains in CORS
+- (api add) ParameterNamed for detailed documentation
+
+2014-04-16
+- (api add) expose constructor of Request for testing.
+
+2014-06-27
+- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
+- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
+
+2014-07-03
+- (api add) CORS can be configured with a list of allowed domains
+
+2014-03-12
+- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
+
+2014-02-26
+- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
+
+2014-02-17
+- (api change) renamed parameter constants (go-lint checks)
+
+2014-01-10
+ - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
+
+2014-01-07
+ - (api change) Write* methods in Response now return the error or nil.
+ - added example of serving HTML from a Go template.
+ - fixed comparing Allowed headers in CORS (is now case-insensitive)
+
+2013-11-13
+ - (api add) Response knows how many bytes are written to the response body.
+
+2013-10-29
+ - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
+
+2013-10-04
+ - (api add) Response knows what HTTP status has been written
+ - (api add) Request can have attributes (map of string->interface, also called request-scoped variables
+
+2013-09-12
+ - (api change) Router interface simplified
+ - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
+
+2013-08-05
+ - add OPTIONS support
+ - add CORS support
+
+2013-08-27
+ - fixed some reported issues (see github)
+ - (api change) deprecated use of WriteError; use WriteErrorString instead
+
+2014-04-15
+ - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
+
+2013-08-08
+ - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
+ - (api add) the swagger package has be extended to have a UI per container.
+ - if panic is detected then a small stack trace is printed (thanks to runner-mei)
+ - (api add) WriteErrorString to Response
+
+Important API changes:
+
+ - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
+ - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
+
+
+2013-07-06
+
+ - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
+
+2013-06-19
+
+ - (improve) DoNotRecover option, moved request body closer, improved ReadEntity
+
+2013-06-03
+
+ - (api change) removed Dispatcher interface, hide PathExpression
+ - changed receiver names of type functions to be more idiomatic Go
+
+2013-06-02
+
+ - (optimize) Cache the RegExp compilation of Paths.
+
+2013-05-22
+
+ - (api add) Added support for request/response filter functions
+
+2013-05-18
+
+
+ - (api add) Added feature to change the default Http Request Dispatch function (travis cline)
+ - (api change) Moved Swagger Webservice to swagger package (see example restful-user)
+
+[2012-11-14 .. 2013-05-18>
+
+ - See https://github.com/emicklei/go-restful/commits
+
+2012-11-14
+
+ - Initial commit
+
+
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE b/src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE
new file mode 100644
index 0000000..ece7ec6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2012,2013 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/README.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/README.md
new file mode 100644
index 0000000..e5492e4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/README.md
@@ -0,0 +1,74 @@
+go-restful
+==========
+
+package for building REST-style Web Services using Google Go
+
+REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
+
+- GET = Retrieve a representation of a resource
+- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
+- PUT = Create if you are sending the full content of the specified resource (URI).
+- PUT = Update if you are updating the full content of the specified resource.
+- DELETE = Delete if you are requesting the server to delete the resource
+- PATCH = Update partial content of a resource
+- OPTIONS = Get information about the communication options for the request URI
+
+### Example
+
+```Go
+ws := new(restful.WebService)
+ws.
+ Path("/users").
+ Consumes(restful.MIME_XML, restful.MIME_JSON).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ws.Route(ws.GET("/{user-id}").To(u.findUser).
+ Doc("get a user").
+ Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
+ Writes(User{}))
+...
+
+func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+}
+```
+
+[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
+
+### Features
+
+- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
+- Configurable router:
+ - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default)
+ - Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
+- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
+- Response API for writing structs to JSON/XML and setting headers
+- Filters for intercepting the request &#8594; response flow on Service or Route level
+- Request-scoped variables using attributes
+- Containers for WebServices on different HTTP endpoints
+- Content encoding (gzip,deflate) of request and response payloads
+- Automatic responses on OPTIONS (using a filter)
+- Automatic CORS request handling (using a filter)
+- API declaration for Swagger UI (see swagger package)
+- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
+- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
+- Configurable (trace) logging
+- Customizable encoding using EntityReaderWriter registration
+- Customizable gzip/deflate readers and writers using CompressorProvider registration
+
+### Resources
+
+- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
+- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
+- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
+- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
+- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
+- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
+
+[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
+
+(c) 2012 - 2015, http://ernestmicklei.com. MIT License
+
+Type ```git shortlog -s``` for a full list of contributors.
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile b/src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile
new file mode 100644
index 0000000..16fd186
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile
@@ -0,0 +1 @@
+{"SkipDirs": ["examples"]}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh b/src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh
new file mode 100644
index 0000000..47ffbe4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh
@@ -0,0 +1,10 @@
+#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
+
+go test -c
+./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
+./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
+
+#go tool pprof go-restful.test tmp.prof
+go tool pprof go-restful.test curly.prof
+
+
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go
new file mode 100644
index 0000000..220b377
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go
@@ -0,0 +1,123 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bufio"
+ "compress/gzip"
+ "compress/zlib"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
+var EnableContentEncoding = false
+
+// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
+type CompressingResponseWriter struct {
+ writer http.ResponseWriter
+ compressor io.WriteCloser
+ encoding string
+}
+
+// Header is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) Header() http.Header {
+ return c.writer.Header()
+}
+
+// WriteHeader is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) WriteHeader(status int) {
+ c.writer.WriteHeader(status)
+}
+
+// Write is part of http.ResponseWriter interface
+// It is passed through the compressor
+func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
+ if c.isCompressorClosed() {
+ return -1, errors.New("Compressing error: tried to write data using closed compressor")
+ }
+ return c.compressor.Write(bytes)
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
+ return c.writer.(http.CloseNotifier).CloseNotify()
+}
+
+// Close the underlying compressor
+func (c *CompressingResponseWriter) Close() error {
+ if c.isCompressorClosed() {
+ return errors.New("Compressing error: tried to close already closed compressor")
+ }
+
+ c.compressor.Close()
+ if ENCODING_GZIP == c.encoding {
+ currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
+ }
+ if ENCODING_DEFLATE == c.encoding {
+ currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
+ }
+ // gc hint needed?
+ c.compressor = nil
+ return nil
+}
+
+func (c *CompressingResponseWriter) isCompressorClosed() bool {
+ return nil == c.compressor
+}
+
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hijacker, ok := c.writer.(http.Hijacker)
+ if !ok {
+ return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
+// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
+func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
+ header := httpRequest.Header.Get(HEADER_AcceptEncoding)
+ gi := strings.Index(header, ENCODING_GZIP)
+ zi := strings.Index(header, ENCODING_DEFLATE)
+ // use in order of appearance
+ if gi == -1 {
+ return zi != -1, ENCODING_DEFLATE
+ } else if zi == -1 {
+ return gi != -1, ENCODING_GZIP
+ } else {
+ if gi < zi {
+ return true, ENCODING_GZIP
+ }
+ return true, ENCODING_DEFLATE
+ }
+}
+
+// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
+func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
+ httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
+ c := new(CompressingResponseWriter)
+ c.writer = httpWriter
+ var err error
+ if ENCODING_GZIP == encoding {
+ w := currentCompressorProvider.AcquireGzipWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_GZIP
+ } else if ENCODING_DEFLATE == encoding {
+ w := currentCompressorProvider.AcquireZlibWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_DEFLATE
+ } else {
+ return nil, errors.New("Unknown encoding:" + encoding)
+ }
+ return c, err
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go
new file mode 100644
index 0000000..ee42601
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go
@@ -0,0 +1,103 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
+// of writers and readers (resources).
+// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
+type BoundedCachedCompressors struct {
+ gzipWriters chan *gzip.Writer
+ gzipReaders chan *gzip.Reader
+ zlibWriters chan *zlib.Writer
+ writersCapacity int
+ readersCapacity int
+}
+
+// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
+func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
+ b := &BoundedCachedCompressors{
+ gzipWriters: make(chan *gzip.Writer, writersCapacity),
+ gzipReaders: make(chan *gzip.Reader, readersCapacity),
+ zlibWriters: make(chan *zlib.Writer, writersCapacity),
+ writersCapacity: writersCapacity,
+ readersCapacity: readersCapacity,
+ }
+ for ix := 0; ix < writersCapacity; ix++ {
+ b.gzipWriters <- newGzipWriter()
+ b.zlibWriters <- newZlibWriter()
+ }
+ for ix := 0; ix < readersCapacity; ix++ {
+ b.gzipReaders <- newGzipReader()
+ }
+ return b
+}
+
+// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
+ var writer *gzip.Writer
+ select {
+ case writer, _ = <-b.gzipWriters:
+ default:
+ // return a new unmanaged one
+ writer = newGzipWriter()
+ }
+ return writer
+}
+
+// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
+ // forget the unmanaged ones
+ if len(b.gzipWriters) < b.writersCapacity {
+ b.gzipWriters <- w
+ }
+}
+
+// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
+ var reader *gzip.Reader
+ select {
+ case reader, _ = <-b.gzipReaders:
+ default:
+ // return a new unmanaged one
+ reader = newGzipReader()
+ }
+ return reader
+}
+
+// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
+ // forget the unmanaged ones
+ if len(b.gzipReaders) < b.readersCapacity {
+ b.gzipReaders <- r
+ }
+}
+
+// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
+ var writer *zlib.Writer
+ select {
+ case writer, _ = <-b.zlibWriters:
+ default:
+ // return a new unmanaged one
+ writer = newZlibWriter()
+ }
+ return writer
+}
+
+// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
+ // forget the unmanaged ones
+ if len(b.zlibWriters) < b.writersCapacity {
+ b.zlibWriters <- w
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go
new file mode 100644
index 0000000..d866ce6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go
@@ -0,0 +1,91 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "compress/gzip"
+ "compress/zlib"
+ "sync"
+)
+
+// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
+type SyncPoolCompessors struct {
+ GzipWriterPool *sync.Pool
+ GzipReaderPool *sync.Pool
+ ZlibWriterPool *sync.Pool
+}
+
+// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
+func NewSyncPoolCompessors() *SyncPoolCompessors {
+ return &SyncPoolCompessors{
+ GzipWriterPool: &sync.Pool{
+ New: func() interface{} { return newGzipWriter() },
+ },
+ GzipReaderPool: &sync.Pool{
+ New: func() interface{} { return newGzipReader() },
+ },
+ ZlibWriterPool: &sync.Pool{
+ New: func() interface{} { return newZlibWriter() },
+ },
+ }
+}
+
+func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
+ return s.GzipWriterPool.Get().(*gzip.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
+ s.GzipWriterPool.Put(w)
+}
+
+func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
+ return s.GzipReaderPool.Get().(*gzip.Reader)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
+ s.GzipReaderPool.Put(r)
+}
+
+func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
+ return s.ZlibWriterPool.Get().(*zlib.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
+ s.ZlibWriterPool.Put(w)
+}
+
+func newGzipWriter() *gzip.Writer {
+ // create with an empty bytes writer; it will be replaced before using the gzipWriter
+ writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
+
+func newGzipReader() *gzip.Reader {
+ // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
+ // we can safely use currentCompressProvider because it is set on package initialization.
+ w := currentCompressorProvider.AcquireGzipWriter()
+ defer currentCompressorProvider.ReleaseGzipWriter(w)
+ b := new(bytes.Buffer)
+ w.Reset(b)
+ w.Flush()
+ w.Close()
+ reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
+ if err != nil {
+ panic(err.Error())
+ }
+ return reader
+}
+
+func newZlibWriter() *zlib.Writer {
+ writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go
new file mode 100644
index 0000000..f028456
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go
@@ -0,0 +1,53 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+type CompressorProvider interface {
+ // Returns a *gzip.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireGzipWriter() *gzip.Writer
+
+ // Releases an aqcuired *gzip.Writer.
+ ReleaseGzipWriter(w *gzip.Writer)
+
+ // Returns a *gzip.Reader which needs to be released later.
+ AcquireGzipReader() *gzip.Reader
+
+ // Releases an aqcuired *gzip.Reader.
+ ReleaseGzipReader(w *gzip.Reader)
+
+ // Returns a *zlib.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireZlibWriter() *zlib.Writer
+
+ // Releases an aqcuired *zlib.Writer.
+ ReleaseZlibWriter(w *zlib.Writer)
+}
+
+// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
+var currentCompressorProvider CompressorProvider
+
+func init() {
+ currentCompressorProvider = NewSyncPoolCompessors()
+}
+
+// CurrentCompressorProvider returns the current CompressorProvider.
+// It is initialized using a SyncPoolCompessors.
+func CurrentCompressorProvider() CompressorProvider {
+ return currentCompressorProvider
+}
+
+// CompressorProvider sets the actual provider of compressors (zlib or gzip).
+func SetCompressorProvider(p CompressorProvider) {
+ if p == nil {
+ panic("cannot set compressor provider to nil")
+ }
+ currentCompressorProvider = p
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go
new file mode 100644
index 0000000..203439c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go
@@ -0,0 +1,30 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
+
+ HEADER_Allow = "Allow"
+ HEADER_Accept = "Accept"
+ HEADER_Origin = "Origin"
+ HEADER_ContentType = "Content-Type"
+ HEADER_LastModified = "Last-Modified"
+ HEADER_AcceptEncoding = "Accept-Encoding"
+ HEADER_ContentEncoding = "Content-Encoding"
+ HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
+ HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
+ HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
+ HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
+ HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
+ HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
+ HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
+ HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
+
+ ENCODING_GZIP = "gzip"
+ ENCODING_DEFLATE = "deflate"
+)
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/container.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/container.go
new file mode 100644
index 0000000..62ded27
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/container.go
@@ -0,0 +1,361 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
+// The requests are further dispatched to routes of WebServices using a RouteSelector
+type Container struct {
+ webServicesLock sync.RWMutex
+ webServices []*WebService
+ ServeMux *http.ServeMux
+ isRegisteredOnRoot bool
+ containerFilters []FilterFunction
+ doNotRecover bool // default is false
+ recoverHandleFunc RecoverHandleFunction
+ serviceErrorHandleFunc ServiceErrorHandleFunction
+ router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
+ contentEncodingEnabled bool // default is false
+}
+
+// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
+func NewContainer() *Container {
+ return &Container{
+ webServices: []*WebService{},
+ ServeMux: http.NewServeMux(),
+ isRegisteredOnRoot: false,
+ containerFilters: []FilterFunction{},
+ doNotRecover: false,
+ recoverHandleFunc: logStackOnRecover,
+ serviceErrorHandleFunc: writeServiceError,
+ router: RouterJSR311{},
+ contentEncodingEnabled: false}
+}
+
+// RecoverHandleFunction declares functions that can be used to handle a panic situation.
+// The first argument is what recover() returns. The second must be used to communicate an error response.
+type RecoverHandleFunction func(interface{}, http.ResponseWriter)
+
+// RecoverHandler changes the default function (logStackOnRecover) to be called
+// when a panic is detected. DoNotRecover must be have its default value (=false).
+func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
+ c.recoverHandleFunc = handler
+}
+
+// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
+// The first argument is the service error, the second is the request that resulted in the error and
+// the third must be used to communicate an error response.
+type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
+
+// ServiceErrorHandler changes the default function (writeServiceError) to be called
+// when a ServiceError is detected.
+func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
+ c.serviceErrorHandleFunc = handler
+}
+
+// DoNotRecover controls whether panics will be caught to return HTTP 500.
+// If set to true, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+func (c *Container) DoNotRecover(doNot bool) {
+ c.doNotRecover = doNot
+}
+
+// Router changes the default Router (currently RouterJSR311)
+func (c *Container) Router(aRouter RouteSelector) {
+ c.router = aRouter
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
+func (c *Container) EnableContentEncoding(enabled bool) {
+ c.contentEncodingEnabled = enabled
+}
+
+// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
+func (c *Container) Add(service *WebService) *Container {
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+
+ // if rootPath was not set then lazy initialize it
+ if len(service.rootPath) == 0 {
+ service.Path("/")
+ }
+
+ // cannot have duplicate root paths
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
+ os.Exit(1)
+ }
+ }
+
+ // If not registered on root then add specific mapping
+ if !c.isRegisteredOnRoot {
+ c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
+ }
+ c.webServices = append(c.webServices, service)
+ return c
+}
+
+// addHandler may set a new HandleFunc for the serveMux
+// this function must run inside the critical region protected by the webServicesLock.
+// returns true if the function was registered on root ("/")
+func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
+ pattern := fixedPrefixPath(service.RootPath())
+ // check if root path registration is needed
+ if "/" == pattern || "" == pattern {
+ serveMux.HandleFunc("/", c.dispatch)
+ return true
+ }
+ // detect if registration already exists
+ alreadyMapped := false
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ alreadyMapped = true
+ break
+ }
+ }
+ if !alreadyMapped {
+ serveMux.HandleFunc(pattern, c.dispatch)
+ if !strings.HasSuffix(pattern, "/") {
+ serveMux.HandleFunc(pattern+"/", c.dispatch)
+ }
+ }
+ return false
+}
+
+func (c *Container) Remove(ws *WebService) error {
+ if c.ServeMux == http.DefaultServeMux {
+ errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
+ log.Printf(errMsg)
+ return errors.New(errMsg)
+ }
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+ // build a new ServeMux and re-register all WebServices
+ newServeMux := http.NewServeMux()
+ newServices := []*WebService{}
+ newIsRegisteredOnRoot := false
+ for _, each := range c.webServices {
+ if each.rootPath != ws.rootPath {
+ // If not registered on root then add specific mapping
+ if !newIsRegisteredOnRoot {
+ newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
+ }
+ newServices = append(newServices, each)
+ }
+ }
+ c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
+ return nil
+}
+
+// logStackOnRecover is the default RecoverHandleFunction and is called
+// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
+// Default implementation logs the stacktrace and writes the stacktrace on the response.
+// This may be a security issue as it exposes sourcecode information.
+func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
+ for i := 2; ; i += 1 {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
+ }
+ log.Print(buffer.String())
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ httpWriter.Write(buffer.Bytes())
+}
+
+// writeServiceError is the default ServiceErrorHandleFunction and is called
+// when a ServiceError is returned during route selection. Default implementation
+// calls resp.WriteErrorString(err.Code, err.Message)
+func writeServiceError(err ServiceError, req *Request, resp *Response) {
+ resp.WriteErrorString(err.Code, err.Message)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ writer := httpWriter
+
+ // CompressingResponseWriter should be closed after all operations are done
+ defer func() {
+ if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
+ compressWriter.Close()
+ }
+ }()
+
+ // Instal panic recovery unless told otherwise
+ if !c.doNotRecover { // catch all for 500 response
+ defer func() {
+ if r := recover(); r != nil {
+ c.recoverHandleFunc(r, writer)
+ return
+ }
+ }()
+ }
+ // Install closing the request body (if any)
+ defer func() {
+ if nil != httpRequest.Body {
+ httpRequest.Body.Close()
+ }
+ }()
+
+ // Detect if compression is needed
+ // assume without compression, test for override
+ if c.contentEncodingEnabled {
+ doCompress, encoding := wantsCompressedResponse(httpRequest)
+ if doCompress {
+ var err error
+ writer, err = NewCompressingResponseWriter(httpWriter, encoding)
+ if err != nil {
+ log.Print("[restful] unable to install compressor: ", err)
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ // Find best match Route ; err is non nil if no match was found
+ var webService *WebService
+ var route *Route
+ var err error
+ func() {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ webService, route, err = c.router.SelectRoute(
+ c.webServices,
+ httpRequest)
+ }()
+ if err != nil {
+ // a non-200 response has already been written
+ // run container filters anyway ; they should not touch the response...
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ switch err.(type) {
+ case ServiceError:
+ ser := err.(ServiceError)
+ c.serviceErrorHandleFunc(ser, req, resp)
+ }
+ // TODO
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
+ return
+ }
+ wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
+ // pass through filters (if any)
+ if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
+ // compose filter chain
+ allFilters := []FilterFunction{}
+ allFilters = append(allFilters, c.containerFilters...)
+ allFilters = append(allFilters, webService.filters...)
+ allFilters = append(allFilters, route.Filters...)
+ chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
+ // handle request by route after passing all filters
+ route.Function(wrappedRequest, wrappedResponse)
+ }}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // no filters, handle request by route
+ route.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
+func fixedPrefixPath(pathspec string) string {
+ varBegin := strings.Index(pathspec, "{")
+ if -1 == varBegin {
+ return pathspec
+ }
+ return pathspec[:varBegin]
+}
+
+// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
+func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
+ c.ServeMux.ServeHTTP(httpwriter, httpRequest)
+}
+
+// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
+func (c Container) Handle(pattern string, handler http.Handler) {
+ c.ServeMux.Handle(pattern, handler)
+}
+
+// HandleWithFilter registers the handler for the given pattern.
+// Container's filter chain is applied for handler.
+// If a handler already exists for pattern, HandleWithFilter panics.
+func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
+ f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
+ if len(c.containerFilters) == 0 {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ return
+ }
+
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
+ }
+
+ c.Handle(pattern, http.HandlerFunc(f))
+}
+
+// Filter appends a container FilterFunction. These are called before dispatching
+// a http.Request to a WebService from the container
+func (c *Container) Filter(filter FilterFunction) {
+ c.containerFilters = append(c.containerFilters, filter)
+}
+
+// RegisteredWebServices returns the collections of added WebServices
+func (c Container) RegisteredWebServices() []*WebService {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ result := make([]*WebService, len(c.webServices))
+ for ix := range c.webServices {
+ result[ix] = c.webServices[ix]
+ }
+ return result
+}
+
+// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
+func (c Container) computeAllowedMethods(req *Request) []string {
+ // Go through all RegisteredWebServices() and all its Routes to collect the options
+ methods := []string{}
+ requestPath := req.Request.URL.Path
+ for _, ws := range c.RegisteredWebServices() {
+ matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ finalMatch := matches[len(matches)-1]
+ for _, rt := range ws.Routes() {
+ matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ methods = append(methods, rt.Method)
+ }
+ }
+ }
+ }
+ }
+ // methods = append(methods, "OPTIONS") not sure about this
+ return methods
+}
+
+// newBasicRequestResponse creates a pair of Request,Response from its http versions.
+// It is basic because no parameter or (produces) content-type information is given.
+func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ resp := NewResponse(httpWriter)
+ resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ return NewRequest(httpRequest), resp
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go
new file mode 100644
index 0000000..1efeef0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go
@@ -0,0 +1,202 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
+// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
+// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
+//
+// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
+// http://enable-cors.org/server.html
+// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
+type CrossOriginResourceSharing struct {
+ ExposeHeaders []string // list of Header names
+ AllowedHeaders []string // list of Header names
+ AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
+ AllowedMethods []string
+ MaxAge int // number of seconds before requiring new Options request
+ CookiesAllowed bool
+ Container *Container
+
+ allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
+}
+
+// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
+// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
+func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if len(origin) == 0 {
+ if trace {
+ traceLogger.Print("no Http header Origin set")
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if !c.isOriginAllowed(origin) { // check whether this origin is allowed
+ if trace {
+ traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if req.Request.Method != "OPTIONS" {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
+ c.doPreflightRequest(req, resp)
+ } else {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+}
+
+func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
+ c.setOptionsHeaders(req, resp)
+ // continue processing the response
+}
+
+func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
+ if len(c.AllowedMethods) == 0 {
+ if c.Container == nil {
+ c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
+ } else {
+ c.AllowedMethods = c.Container.computeAllowedMethods(req)
+ }
+ }
+
+ acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
+ if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestMethod,
+ acrm,
+ c.AllowedMethods)
+ }
+ return
+ }
+ acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+ if len(acrhs) > 0 {
+ for _, each := range strings.Split(acrhs, ",") {
+ if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestHeaders,
+ acrhs,
+ c.AllowedHeaders)
+ }
+ return
+ }
+ }
+ }
+ resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
+ resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
+ c.setOptionsHeaders(req, resp)
+
+ // return http 200 response, no body
+}
+
+func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
+ c.checkAndSetExposeHeaders(resp)
+ c.setAllowOriginHeader(req, resp)
+ c.checkAndSetAllowCredentials(resp)
+ if c.MaxAge > 0 {
+ resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
+ }
+}
+
+func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
+ if len(origin) == 0 {
+ return false
+ }
+ if len(c.AllowedDomains) == 0 {
+ return true
+ }
+
+ allowed := false
+ for _, domain := range c.AllowedDomains {
+ if domain == origin {
+ allowed = true
+ break
+ }
+ }
+
+ if !allowed {
+ if len(c.allowedOriginPatterns) == 0 {
+ // compile allowed domains to allowed origin patterns
+ allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
+ if err != nil {
+ return false
+ }
+ c.allowedOriginPatterns = allowedOriginRegexps
+ }
+
+ for _, pattern := range c.allowedOriginPatterns {
+ if allowed = pattern.MatchString(origin); allowed {
+ break
+ }
+ }
+ }
+
+ return allowed
+}
+
+func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if c.isOriginAllowed(origin) {
+ resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
+ if len(c.ExposeHeaders) > 0 {
+ resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
+ if c.CookiesAllowed {
+ resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
+ }
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
+ for _, each := range allowedMethods {
+ if each == method {
+ return true
+ }
+ }
+ return false
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
+ for _, each := range c.AllowedHeaders {
+ if strings.ToLower(each) == strings.ToLower(header) {
+ return true
+ }
+ }
+ return false
+}
+
+// Take a list of strings and compile them into a list of regular expressions.
+func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+ regexps := []*regexp.Regexp{}
+ for _, regexpStr := range regexpStrings {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return regexps, err
+ }
+ regexps = append(regexps, r)
+ }
+ return regexps, nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh b/src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh
new file mode 100644
index 0000000..e27dbf1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh
@@ -0,0 +1,2 @@
+go test -coverprofile=coverage.out
+go tool cover -html=coverage.out \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go
new file mode 100644
index 0000000..185300d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go
@@ -0,0 +1,162 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
+type CurlyRouter struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (c CurlyRouter) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
+
+ requestTokens := tokenizePath(httpRequest.URL.Path)
+
+ detectedService := c.detectWebService(requestTokens, webServices)
+ if detectedService == nil {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
+ }
+ return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ candidateRoutes := c.selectRoutes(detectedService, requestTokens)
+ if len(candidateRoutes) == 0 {
+ if trace {
+ traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
+ }
+ return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
+ if selectedRoute == nil {
+ return detectedService, nil, err
+ }
+ return detectedService, selectedRoute, nil
+}
+
+// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
+func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
+ candidates := sortableCurlyRoutes{}
+ for _, each := range ws.routes {
+ matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
+ if matches {
+ candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ }
+ }
+ sort.Sort(sort.Reverse(candidates))
+ return candidates
+}
+
+// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
+func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
+ if len(routeTokens) < len(requestTokens) {
+ // proceed in matching only if last routeToken is wildcard
+ count := len(routeTokens)
+ if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
+ return false, 0, 0
+ }
+ // proceed
+ }
+ for i, routeToken := range routeTokens {
+ if i == len(requestTokens) {
+ // reached end of request path
+ return false, 0, 0
+ }
+ requestToken := requestTokens[i]
+ if strings.HasPrefix(routeToken, "{") {
+ paramCount++
+ if colon := strings.Index(routeToken, ":"); colon != -1 {
+ // match by regex
+ matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
+ if !matchesToken {
+ return false, 0, 0
+ }
+ if matchesRemainder {
+ break
+ }
+ }
+ } else { // no { prefix
+ if requestToken != routeToken {
+ return false, 0, 0
+ }
+ staticCount++
+ }
+ }
+ return true, paramCount, staticCount
+}
+
+// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
+// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
+func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
+ regPart := routeToken[colon+1 : len(routeToken)-1]
+ if regPart == "*" {
+ if trace {
+ traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
+ }
+ return true, true
+ }
+ matched, err := regexp.MatchString(regPart, requestToken)
+ return (matched && err == nil), false
+}
+
+// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
+// headers of the Request. See also RouterJSR311 in jsr311.go
+func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
+ // tracing is done inside detectRoute
+ return RouterJSR311{}.detectRoute(candidateRoutes.routes(), httpRequest)
+}
+
+// detectWebService returns the best matching webService given the list of path tokens.
+// see also computeWebserviceScore
+func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
+ var best *WebService
+ score := -1
+ for _, each := range webServices {
+ matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+ if matches && (eachScore > score) {
+ best = each
+ score = eachScore
+ }
+ }
+ return best
+}
+
+// computeWebserviceScore returns whether tokens match and
+// the weighted score of the longest matching consecutive tokens from the beginning.
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
+ if len(tokens) > len(requestTokens) {
+ return false, 0
+ }
+ score := 0
+ for i := 0; i < len(tokens); i++ {
+ each := requestTokens[i]
+ other := tokens[i]
+ if len(each) == 0 && len(other) == 0 {
+ score++
+ continue
+ }
+ if len(other) > 0 && strings.HasPrefix(other, "{") {
+ // no empty match
+ if len(each) == 0 {
+ return false, score
+ }
+ score += 1
+ } else {
+ // not a parameter
+ if each != other {
+ return false, score
+ }
+ score += (len(tokens) - i) * 10 //fuzzy
+ }
+ }
+ return true, score
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go
new file mode 100644
index 0000000..296f946
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go
@@ -0,0 +1,52 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
+type curlyRoute struct {
+ route Route
+ paramCount int
+ staticCount int
+}
+
+type sortableCurlyRoutes []curlyRoute
+
+func (s *sortableCurlyRoutes) add(route curlyRoute) {
+ *s = append(*s, route)
+}
+
+func (s sortableCurlyRoutes) routes() (routes []Route) {
+ for _, each := range s {
+ routes = append(routes, each.route) // TODO change return type
+ }
+ return routes
+}
+
+func (s sortableCurlyRoutes) Len() int {
+ return len(s)
+}
+func (s sortableCurlyRoutes) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s sortableCurlyRoutes) Less(i, j int) bool {
+ ci := s[i]
+ cj := s[j]
+
+ // primary key
+ if ci.staticCount < cj.staticCount {
+ return true
+ }
+ if ci.staticCount > cj.staticCount {
+ return false
+ }
+ // secundary key
+ if ci.paramCount < cj.paramCount {
+ return true
+ }
+ if ci.paramCount > cj.paramCount {
+ return false
+ }
+ return ci.route.Path < cj.route.Path
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go
new file mode 100644
index 0000000..d40405b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go
@@ -0,0 +1,196 @@
+/*
+Package restful, a lean package for creating REST-style WebServices without magic.
+
+WebServices and Routes
+
+A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
+Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
+WebServices must be added to a container (see below) in order to handler Http requests from a server.
+
+A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
+This package has the logic to find the best matching Route and if found, call its Function.
+
+ ws := new(restful.WebService)
+ ws.
+ Path("/users").
+ Consumes(restful.MIME_JSON, restful.MIME_XML).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
+
+ ...
+
+ // GET http://localhost:8080/users/1
+ func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+ }
+
+The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
+
+Regular expression matching Routes
+
+A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
+For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
+Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
+This feature requires the use of a CurlyRouter.
+
+Containers
+
+A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
+Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
+The Default container of go-restful uses the http.DefaultServeMux.
+You can create your own Container and create a new http.Server for that particular container.
+
+ container := restful.NewContainer()
+ server := &http.Server{Addr: ":8081", Handler: container}
+
+Filters
+
+A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
+You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
+In the restful package there are three hooks into the request,response flow where filters can be added.
+Each filter must define a FilterFunction:
+
+ func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
+
+Use the following statement to pass the request,response pair to the next filter or RouteFunction
+
+ chain.ProcessFilter(req, resp)
+
+Container Filters
+
+These are processed before any registered WebService.
+
+ // install a (global) filter for the default container (processed before any webservice)
+ restful.Filter(globalLogging)
+
+WebService Filters
+
+These are processed before any Route of a WebService.
+
+ // install a webservice filter (processed before any route)
+ ws.Filter(webserviceLogging).Filter(measureTime)
+
+
+Route Filters
+
+These are processed before calling the function associated with the Route.
+
+ // install 2 chained route filters (processed before calling findUser)
+ ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
+
+Response Encoding
+
+Two encodings are supported: gzip and deflate. To enable this for all responses:
+
+ restful.DefaultContainer.EnableContentEncoding(true)
+
+If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
+Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
+
+OPTIONS support
+
+By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
+
+ Filter(OPTIONSFilter())
+
+CORS
+
+By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
+
+ cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
+ Filter(cors.Filter)
+
+Error Handling
+
+Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
+For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
+
+ 400: Bad Request
+
+If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
+
+ 404: Not Found
+
+Despite a valid URI, the resource requested may not be available
+
+ 500: Internal Server Error
+
+If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
+
+ 405: Method Not Allowed
+
+The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
+
+ 406: Not Acceptable
+
+The request does not have or has an unknown Accept Header set for this operation.
+
+ 415: Unsupported Media Type
+
+The request does not have or has an unknown Content-Type Header set for this operation.
+
+ServiceError
+
+In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
+
+Performance options
+
+This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
+
+ restful.DefaultContainer.Router(CurlyRouter{})
+
+The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
+However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
+The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
+
+ restful.DefaultContainer.DoNotRecover(true)
+
+DoNotRecover controls whether panics will be caught to return HTTP 500.
+If set to true, Route functions are responsible for handling any error situation.
+Default value is false; it will recover from panics. This has performance implications.
+
+ restful.SetCacheReadEntity(false)
+
+SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
+If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
+
+ restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
+
+If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
+Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
+
+Trouble shooting
+
+This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
+Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
+
+ restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
+
+Logging
+
+The restful.SetLogger() method allows you to override the logger used by the package. By default restful
+uses the standard library `log` package and logs to stdout. Different logging packages are supported as
+long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
+preferred package is simple.
+
+Resources
+
+[project]: https://github.com/emicklei/go-restful
+
+[examples]: https://github.com/emicklei/go-restful/blob/master/examples
+
+[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
+
+[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
+
+(c) 2012-2015, http://ernestmicklei.com. MIT License
+*/
+package restful
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go
new file mode 100644
index 0000000..6ecf6c7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go
@@ -0,0 +1,163 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "strings"
+ "sync"
+)
+
+// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
+type EntityReaderWriter interface {
+ // Read a serialized version of the value from the request.
+ // The Request may have a decompressing reader. Depends on Content-Encoding.
+ Read(req *Request, v interface{}) error
+
+ // Write a serialized version of the value on the response.
+ // The Response may have a compressing writer. Depends on Accept-Encoding.
+ // status should be a valid Http Status code
+ Write(resp *Response, status int, v interface{}) error
+}
+
+// entityAccessRegistry is a singleton
+var entityAccessRegistry = &entityReaderWriters{
+ protection: new(sync.RWMutex),
+ accessors: map[string]EntityReaderWriter{},
+}
+
+// entityReaderWriters associates MIME to an EntityReaderWriter
+type entityReaderWriters struct {
+ protection *sync.RWMutex
+ accessors map[string]EntityReaderWriter
+}
+
+func init() {
+ RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
+ RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
+}
+
+// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
+func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
+ entityAccessRegistry.protection.Lock()
+ defer entityAccessRegistry.protection.Unlock()
+ entityAccessRegistry.accessors[mime] = erw
+}
+
+// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
+// This package is already initialized with such an accessor using the MIME_JSON contentType.
+func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
+ return entityJSONAccess{ContentType: contentType}
+}
+
+// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
+// This package is already initialized with such an accessor using the MIME_XML contentType.
+func NewEntityAccessorXML(contentType string) EntityReaderWriter {
+ return entityXMLAccess{ContentType: contentType}
+}
+
+// accessorAt returns the registered ReaderWriter for this MIME type.
+func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
+ r.protection.RLock()
+ defer r.protection.RUnlock()
+ er, ok := r.accessors[mime]
+ if !ok {
+ // retry with reverse lookup
+ // more expensive but we are in an exceptional situation anyway
+ for k, v := range r.accessors {
+ if strings.Contains(mime, k) {
+ return v, true
+ }
+ }
+ }
+ return er, ok
+}
+
+// entityXMLAccess is a EntityReaderWriter for XML encoding
+type entityXMLAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from XML
+func (e entityXMLAccess) Read(req *Request, v interface{}) error {
+ return xml.NewDecoder(req.Request.Body).Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeXML(resp, status, e.ContentType, v)
+}
+
+// writeXML marshalls the value to JSON and set the Content-Type Header.
+func writeXML(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := xml.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write([]byte(xml.Header))
+ if err != nil {
+ return err
+ }
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return xml.NewEncoder(resp).Encode(v)
+}
+
+// entityJSONAccess is a EntityReaderWriter for JSON encoding
+type entityJSONAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from JSON
+func (e entityJSONAccess) Read(req *Request, v interface{}) error {
+ decoder := json.NewDecoder(req.Request.Body)
+ decoder.UseNumber()
+ return decoder.Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeJSON(resp, status, e.ContentType, v)
+}
+
+// write marshalls the value to JSON and set the Content-Type Header.
+func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := json.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return json.NewEncoder(resp).Encode(v)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go
new file mode 100644
index 0000000..4b86656
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go
@@ -0,0 +1,26 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
+type FilterChain struct {
+ Filters []FilterFunction // ordered list of FilterFunction
+ Index int // index into filters that is currently in progress
+ Target RouteFunction // function to call after passing all filters
+}
+
+// ProcessFilter passes the request,response pair through the next of Filters.
+// Each filter can decide to proceed to the next Filter or handle the Response itself.
+func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
+ if f.Index < len(f.Filters) {
+ f.Index++
+ f.Filters[f.Index-1](request, response, f)
+ } else {
+ f.Target(request, response)
+ }
+}
+
+// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
+type FilterFunction func(*Request, *Response, *FilterChain)
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh b/src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh
new file mode 100644
index 0000000..5fe03b5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh
@@ -0,0 +1,9 @@
+cd examples
+ ls *.go | xargs -I {} go build -o /tmp/ignore {}
+ cd ..
+go fmt ...swagger && \
+go test -test.v ...swagger && \
+go install ...swagger && \
+go fmt ...restful && \
+go test -test.v ...restful && \
+go install ...restful \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go
new file mode 100644
index 0000000..511444a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go
@@ -0,0 +1,248 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "sort"
+)
+
+// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
+// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
+// RouterJSR311 implements the Router interface.
+// Concept of locators is not implemented.
+type RouterJSR311 struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (r RouterJSR311) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
+
+ // Identify the root resource class (WebService)
+ dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
+ if err != nil {
+ return nil, nil, NewError(http.StatusNotFound, "")
+ }
+ // Obtain the set of candidate methods (Routes)
+ routes := r.selectRoutes(dispatcher, finalMatch)
+ if len(routes) == 0 {
+ return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+
+ // Identify the method (Route) that will handle the request
+ route, ok := r.detectRoute(routes, httpRequest)
+ return dispatcher, route, ok
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
+ // http method
+ methodOk := []Route{}
+ for _, each := range routes {
+ if httpRequest.Method == each.Method {
+ methodOk = append(methodOk, each)
+ }
+ }
+ if len(methodOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
+ }
+ return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
+ }
+ inputMediaOk := methodOk
+
+ // content-type
+ contentType := httpRequest.Header.Get(HEADER_ContentType)
+ inputMediaOk = []Route{}
+ for _, each := range methodOk {
+ if each.matchesContentType(contentType) {
+ inputMediaOk = append(inputMediaOk, each)
+ }
+ }
+ if len(inputMediaOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
+ }
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
+ }
+
+ // accept
+ outputMediaOk := []Route{}
+ accept := httpRequest.Header.Get(HEADER_Accept)
+ if len(accept) == 0 {
+ accept = "*/*"
+ }
+ for _, each := range inputMediaOk {
+ if each.matchesAccept(accept) {
+ outputMediaOk = append(outputMediaOk, each)
+ }
+ }
+ if len(outputMediaOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
+ }
+ return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
+ }
+ // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+ return &outputMediaOk[0], nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// n/m > n/* > */*
+func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
+ // TODO
+ return &routes[0]
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
+func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
+ filtered := &sortableRouteCandidates{}
+ for _, each := range dispatcher.Routes() {
+ pathExpr := each.pathExpr
+ matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ filtered.candidates = append(filtered.candidates,
+ routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
+ }
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
+ }
+ return []Route{}
+ }
+ sort.Sort(sort.Reverse(filtered))
+
+ // select other routes from candidates whoes expression matches rmatch
+ matchingRoutes := []Route{filtered.candidates[0].route}
+ for c := 1; c < len(filtered.candidates); c++ {
+ each := filtered.candidates[c]
+ if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
+ matchingRoutes = append(matchingRoutes, each.route)
+ }
+ }
+ return matchingRoutes
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
+func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
+ filtered := &sortableDispatcherCandidates{}
+ for _, each := range dispatchers {
+ matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ filtered.candidates = append(filtered.candidates,
+ dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
+ }
+ return nil, "", errors.New("not found")
+ }
+ sort.Sort(sort.Reverse(filtered))
+ return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
+}
+
+// Types and functions to support the sorting of Routes
+
+type routeCandidate struct {
+ route Route
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+
+func (r routeCandidate) expressionToMatch() string {
+ return r.route.pathExpr.Source
+}
+
+func (r routeCandidate) String() string {
+ return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
+}
+
+type sortableRouteCandidates struct {
+ candidates []routeCandidate
+}
+
+func (rcs *sortableRouteCandidates) Len() int {
+ return len(rcs.candidates)
+}
+func (rcs *sortableRouteCandidates) Swap(i, j int) {
+ rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
+}
+func (rcs *sortableRouteCandidates) Less(i, j int) bool {
+ ci := rcs.candidates[i]
+ cj := rcs.candidates[j]
+ // primary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // secundary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // tertiary key
+ if ci.nonDefaultCount < cj.nonDefaultCount {
+ return true
+ }
+ if ci.nonDefaultCount > cj.nonDefaultCount {
+ return false
+ }
+ // quaternary key ("source" is interpreted as Path)
+ return ci.route.Path < cj.route.Path
+}
+
+// Types and functions to support the sorting of Dispatchers
+
+type dispatcherCandidate struct {
+ dispatcher *WebService
+ finalMatch string
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+type sortableDispatcherCandidates struct {
+ candidates []dispatcherCandidate
+}
+
+func (dc *sortableDispatcherCandidates) Len() int {
+ return len(dc.candidates)
+}
+func (dc *sortableDispatcherCandidates) Swap(i, j int) {
+ dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
+}
+func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
+ ci := dc.candidates[i]
+ cj := dc.candidates[j]
+ // primary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // secundary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // tertiary key
+ return ci.nonDefaultCount < cj.nonDefaultCount
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go
new file mode 100644
index 0000000..f70d895
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go
@@ -0,0 +1,31 @@
+package log
+
+import (
+ stdlog "log"
+ "os"
+)
+
+// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+}
+
+var Logger StdLogger
+
+func init() {
+ // default Logger
+ SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
+}
+
+func SetLogger(customLogger StdLogger) {
+ Logger = customLogger
+}
+
+func Print(v ...interface{}) {
+ Logger.Print(v...)
+}
+
+func Printf(format string, v ...interface{}) {
+ Logger.Printf(format, v...)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go
new file mode 100644
index 0000000..3f1c4db
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go
@@ -0,0 +1,32 @@
+package restful
+
+// Copyright 2014 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+import (
+ "github.com/emicklei/go-restful/log"
+)
+
+var trace bool = false
+var traceLogger log.StdLogger
+
+func init() {
+ traceLogger = log.Logger // use the package logger by default
+}
+
+// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
+// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
+func TraceLogger(logger log.StdLogger) {
+ traceLogger = logger
+ EnableTracing(logger != nil)
+}
+
+// expose the setter for the global logger on the top-level package
+func SetLogger(customLogger log.StdLogger) {
+ log.SetLogger(customLogger)
+}
+
+// EnableTracing can be used to Trace logging on and off.
+func EnableTracing(enabled bool) {
+ trace = enabled
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go
new file mode 100644
index 0000000..d7ea2b6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go
@@ -0,0 +1,45 @@
+package restful
+
+import (
+ "strconv"
+ "strings"
+)
+
+type mime struct {
+ media string
+ quality float64
+}
+
+// insertMime adds a mime to a list and keeps it sorted by quality.
+func insertMime(l []mime, e mime) []mime {
+ for i, each := range l {
+ // if current mime has lower quality then insert before
+ if e.quality > each.quality {
+ left := append([]mime{}, l[0:i]...)
+ return append(append(left, e), l[i:]...)
+ }
+ }
+ return append(l, e)
+}
+
+// sortedMimes returns a list of mime sorted (desc) by its specified quality.
+func sortedMimes(accept string) (sorted []mime) {
+ for _, each := range strings.Split(accept, ",") {
+ typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
+ if len(typeAndQuality) == 1 {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+ } else {
+ // take factor
+ parts := strings.Split(typeAndQuality[1], "=")
+ if len(parts) == 2 {
+ f, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ traceLogger.Printf("unable to parse quality in %s, %v", each, err)
+ } else {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], f})
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go
new file mode 100644
index 0000000..4514ead
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go
@@ -0,0 +1,26 @@
+package restful
+
+import "strings"
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// As for any filter, you can also install it for a particular WebService within a Container.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
+ if "OPTIONS" != req.Request.Method {
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
+}
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func OPTIONSFilter() FilterFunction {
+ return DefaultContainer.OPTIONSFilter
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go
new file mode 100644
index 0000000..e11c816
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go
@@ -0,0 +1,114 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ // PathParameterKind = indicator of Request parameter type "path"
+ PathParameterKind = iota
+
+ // QueryParameterKind = indicator of Request parameter type "query"
+ QueryParameterKind
+
+ // BodyParameterKind = indicator of Request parameter type "body"
+ BodyParameterKind
+
+ // HeaderParameterKind = indicator of Request parameter type "header"
+ HeaderParameterKind
+
+ // FormParameterKind = indicator of Request parameter type "form"
+ FormParameterKind
+)
+
+// Parameter is for documententing the parameter used in a Http Request
+// ParameterData kinds are Path,Query and Body
+type Parameter struct {
+ data *ParameterData
+}
+
+// ParameterData represents the state of a Parameter.
+// It is made public to make it accessible to e.g. the Swagger package.
+type ParameterData struct {
+ Name, Description, DataType, DataFormat string
+ Kind int
+ Required bool
+ AllowableValues map[string]string
+ AllowMultiple bool
+ DefaultValue string
+}
+
+// Data returns the state of the Parameter
+func (p *Parameter) Data() ParameterData {
+ return *p.data
+}
+
+// Kind returns the parameter type indicator (see const for valid values)
+func (p *Parameter) Kind() int {
+ return p.data.Kind
+}
+
+func (p *Parameter) bePath() *Parameter {
+ p.data.Kind = PathParameterKind
+ return p
+}
+func (p *Parameter) beQuery() *Parameter {
+ p.data.Kind = QueryParameterKind
+ return p
+}
+func (p *Parameter) beBody() *Parameter {
+ p.data.Kind = BodyParameterKind
+ return p
+}
+
+func (p *Parameter) beHeader() *Parameter {
+ p.data.Kind = HeaderParameterKind
+ return p
+}
+
+func (p *Parameter) beForm() *Parameter {
+ p.data.Kind = FormParameterKind
+ return p
+}
+
+// Required sets the required field and returns the receiver
+func (p *Parameter) Required(required bool) *Parameter {
+ p.data.Required = required
+ return p
+}
+
+// AllowMultiple sets the allowMultiple field and returns the receiver
+func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
+ p.data.AllowMultiple = multiple
+ return p
+}
+
+// AllowableValues sets the allowableValues field and returns the receiver
+func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
+ p.data.AllowableValues = values
+ return p
+}
+
+// DataType sets the dataType field and returns the receiver
+func (p *Parameter) DataType(typeName string) *Parameter {
+ p.data.DataType = typeName
+ return p
+}
+
+// DataFormat sets the dataFormat field for Swagger UI
+func (p *Parameter) DataFormat(formatName string) *Parameter {
+ p.data.DataFormat = formatName
+ return p
+}
+
+// DefaultValue sets the default value field and returns the receiver
+func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
+ p.data.DefaultValue = stringRepresentation
+ return p
+}
+
+// Description sets the description value field and returns the receiver
+func (p *Parameter) Description(doc string) *Parameter {
+ p.data.Description = doc
+ return p
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go
new file mode 100644
index 0000000..a921e6f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go
@@ -0,0 +1,69 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// PathExpression holds a compiled path expression (RegExp) needed to match against
+// Http request paths and to extract path parameter values.
+type pathExpression struct {
+ LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
+ VarCount int // the number of named parameters (enclosed by {}) in the path
+ Matcher *regexp.Regexp
+ Source string // Path as defined by the RouteBuilder
+ tokens []string
+}
+
+// NewPathExpression creates a PathExpression from the input URL path.
+// Returns an error if the path is invalid.
+func newPathExpression(path string) (*pathExpression, error) {
+ expression, literalCount, varCount, tokens := templateToRegularExpression(path)
+ compiled, err := regexp.Compile(expression)
+ if err != nil {
+ return nil, err
+ }
+ return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
+func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
+ var buffer bytes.Buffer
+ buffer.WriteString("^")
+ //tokens = strings.Split(template, "/")
+ tokens = tokenizePath(template)
+ for _, each := range tokens {
+ if each == "" {
+ continue
+ }
+ buffer.WriteString("/")
+ if strings.HasPrefix(each, "{") {
+ // check for regular expression in variable
+ colon := strings.Index(each, ":")
+ if colon != -1 {
+ // extract expression
+ paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
+ if paramExpr == "*" { // special case
+ buffer.WriteString("(.*)")
+ } else {
+ buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
+ }
+ } else {
+ // plain var
+ buffer.WriteString("([^/]+?)")
+ }
+ varCount += 1
+ } else {
+ literalCount += len(each)
+ encoded := each // TODO URI encode
+ buffer.WriteString(regexp.QuoteMeta(encoded))
+ }
+ }
+ return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/request.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/request.go
new file mode 100644
index 0000000..3e42346
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/request.go
@@ -0,0 +1,131 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io/ioutil"
+ "net/http"
+)
+
+var defaultRequestContentType string
+
+var doCacheReadEntityBytes = true
+
+// Request is a wrapper for a http Request that provides convenience methods
+type Request struct {
+ Request *http.Request
+ bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
+ pathParameters map[string]string
+ attributes map[string]interface{} // for storing request-scoped values
+ selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
+}
+
+func NewRequest(httpRequest *http.Request) *Request {
+ return &Request{
+ Request: httpRequest,
+ pathParameters: map[string]string{},
+ attributes: map[string]interface{}{},
+ } // empty parameters, attributes
+}
+
+// If ContentType is missing or */* is given then fall back to this type, otherwise
+// a "Unable to unmarshal content of type:" response is returned.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultRequestContentType(restful.MIME_JSON)
+func DefaultRequestContentType(mime string) {
+ defaultRequestContentType = mime
+}
+
+// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
+// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
+func SetCacheReadEntity(doCache bool) {
+ doCacheReadEntityBytes = doCache
+}
+
+// PathParameter accesses the Path parameter value by its name
+func (r *Request) PathParameter(name string) string {
+ return r.pathParameters[name]
+}
+
+// PathParameters accesses the Path parameter values
+func (r *Request) PathParameters() map[string]string {
+ return r.pathParameters
+}
+
+// QueryParameter returns the (first) Query parameter value by its name
+func (r *Request) QueryParameter(name string) string {
+ return r.Request.FormValue(name)
+}
+
+// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
+func (r *Request) BodyParameter(name string) (string, error) {
+ err := r.Request.ParseForm()
+ if err != nil {
+ return "", err
+ }
+ return r.Request.PostFormValue(name), nil
+}
+
+// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
+func (r *Request) HeaderParameter(name string) string {
+ return r.Request.Header.Get(name)
+}
+
+// ReadEntity checks the Accept header and reads the content into the entityPointer.
+func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
+ contentType := r.Request.Header.Get(HEADER_ContentType)
+ contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
+
+ // OLD feature, cache the body for reads
+ if doCacheReadEntityBytes {
+ if r.bodyContent == nil {
+ data, err := ioutil.ReadAll(r.Request.Body)
+ if err != nil {
+ return err
+ }
+ r.bodyContent = &data
+ }
+ r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
+ }
+
+ // check if the request body needs decompression
+ if ENCODING_GZIP == contentEncoding {
+ gzipReader := currentCompressorProvider.AcquireGzipReader()
+ defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
+ gzipReader.Reset(r.Request.Body)
+ r.Request.Body = gzipReader
+ } else if ENCODING_DEFLATE == contentEncoding {
+ zlibReader, err := zlib.NewReader(r.Request.Body)
+ if err != nil {
+ return err
+ }
+ r.Request.Body = zlibReader
+ }
+
+ // lookup the EntityReader
+ entityReader, ok := entityAccessRegistry.accessorAt(contentType)
+ if !ok {
+ return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+ }
+ return entityReader.Read(r, entityPointer)
+}
+
+// SetAttribute adds or replaces the attribute with the given value.
+func (r *Request) SetAttribute(name string, value interface{}) {
+ r.attributes[name] = value
+}
+
+// Attribute returns the value associated to the given name. Returns nil if absent.
+func (r Request) Attribute(name string) interface{} {
+ return r.attributes[name]
+}
+
+// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
+func (r Request) SelectedRoutePath() string {
+ return r.selectedRoutePath
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/response.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/response.go
new file mode 100644
index 0000000..971cd0b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/response.go
@@ -0,0 +1,235 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "net/http"
+)
+
+// DEPRECATED, use DefaultResponseContentType(mime)
+var DefaultResponseMimeType string
+
+//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
+var PrettyPrintResponses = true
+
+// Response is a wrapper on the actual http ResponseWriter
+// It provides several convenience methods to prepare and write response content.
+type Response struct {
+ http.ResponseWriter
+ requestAccept string // mime-type what the Http Request says it wants to receive
+ routeProduces []string // mime-types what the Route says it can produce
+ statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
+ contentLength int // number of bytes written for the response body
+ prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
+ err error // err property is kept when WriteError is called
+}
+
+// Creates a new response based on a http ResponseWriter.
+func NewResponse(httpWriter http.ResponseWriter) *Response {
+ return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
+}
+
+// If Accept header matching fails, fall back to this type.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultResponseContentType(restful.MIME_JSON)
+func DefaultResponseContentType(mime string) {
+ DefaultResponseMimeType = mime
+}
+
+// InternalServerError writes the StatusInternalServerError header.
+// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
+func (r Response) InternalServerError() Response {
+ r.WriteHeader(http.StatusInternalServerError)
+ return r
+}
+
+// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
+func (r *Response) PrettyPrint(bePretty bool) {
+ r.prettyPrint = bePretty
+}
+
+// AddHeader is a shortcut for .Header().Add(header,value)
+func (r Response) AddHeader(header string, value string) Response {
+ r.Header().Add(header, value)
+ return r
+}
+
+// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
+func (r *Response) SetRequestAccepts(mime string) {
+ r.requestAccept = mime
+}
+
+// EntityWriter returns the registered EntityWriter that the entity (requested resource)
+// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
+// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
+func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
+ sorted := sortedMimes(r.requestAccept)
+ for _, eachAccept := range sorted {
+ for _, eachProduce := range r.routeProduces {
+ if eachProduce == eachAccept.media {
+ if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
+ return w, true
+ }
+ }
+ }
+ if eachAccept.media == "*/*" {
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ }
+ }
+ // if requestAccept is empty
+ writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
+ if !ok {
+ // if not registered then fallback to the defaults (if set)
+ if DefaultResponseMimeType == MIME_JSON {
+ return entityAccessRegistry.accessorAt(MIME_JSON)
+ }
+ if DefaultResponseMimeType == MIME_XML {
+ return entityAccessRegistry.accessorAt(MIME_XML)
+ }
+ // Fallback to whatever the route says it can produce.
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ if trace {
+ traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
+ }
+ }
+ return writer, ok
+}
+
+// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
+func (r *Response) WriteEntity(value interface{}) error {
+ return r.WriteHeaderAndEntity(http.StatusOK, value)
+}
+
+// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
+// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
+// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
+// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
+// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
+// Current implementation ignores any q-parameters in the Accept Header.
+// Returns an error if the value could not be written on the response.
+func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
+ writer, ok := r.EntityWriter()
+ if !ok {
+ r.WriteHeader(http.StatusNotAcceptable)
+ return nil
+ }
+ return writer.Write(r, status, value)
+}
+
+// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsXml(value interface{}) error {
+ return writeXML(r, http.StatusOK, MIME_XML, value)
+}
+
+// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
+ return writeXML(r, status, MIME_XML, value)
+}
+
+// WriteAsJson is a convenience method for writing a value in json.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsJson(value interface{}) error {
+ return writeJSON(r, http.StatusOK, MIME_JSON, value)
+}
+
+// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteJson(value interface{}, contentType string) error {
+ return writeJSON(r, http.StatusOK, contentType, value)
+}
+
+// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
+ return writeJSON(r, status, contentType, value)
+}
+
+// WriteError write the http status and the error string on the response.
+func (r *Response) WriteError(httpStatus int, err error) error {
+ r.err = err
+ return r.WriteErrorString(httpStatus, err.Error())
+}
+
+// WriteServiceError is a convenience method for a responding with a status and a ServiceError
+func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
+ r.err = err
+ return r.WriteHeaderAndEntity(httpStatus, err)
+}
+
+// WriteErrorString is a convenience method for an error status with the actual error
+func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
+ if r.err == nil {
+ // if not called from WriteError
+ r.err = errors.New(errorReason)
+ }
+ r.WriteHeader(httpStatus)
+ if _, err := r.Write([]byte(errorReason)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Flush implements http.Flusher interface, which sends any buffered data to the client.
+func (r *Response) Flush() {
+ if f, ok := r.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ } else if trace {
+ traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
+ }
+}
+
+// WriteHeader is overridden to remember the Status Code that has been written.
+// Changes to the Header of the response have no effect after this.
+func (r *Response) WriteHeader(httpStatus int) {
+ r.statusCode = httpStatus
+ r.ResponseWriter.WriteHeader(httpStatus)
+}
+
+// StatusCode returns the code that has been written using WriteHeader.
+func (r Response) StatusCode() int {
+ if 0 == r.statusCode {
+ // no status code has been written yet; assume OK
+ return http.StatusOK
+ }
+ return r.statusCode
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// Write is part of http.ResponseWriter interface.
+func (r *Response) Write(bytes []byte) (int, error) {
+ written, err := r.ResponseWriter.Write(bytes)
+ r.contentLength += written
+ return written, err
+}
+
+// ContentLength returns the number of bytes written for the response content.
+// Note that this value is only correct if all data is written through the Response using its Write* methods.
+// Data written directly using the underlying http.ResponseWriter is not accounted for.
+func (r Response) ContentLength() int {
+ return r.contentLength
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (r Response) CloseNotify() <-chan bool {
+ return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Error returns the err created by WriteError
+func (r Response) Error() error {
+ return r.err
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/route.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/route.go
new file mode 100644
index 0000000..f54e862
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/route.go
@@ -0,0 +1,183 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "net/http"
+ "strings"
+)
+
+// RouteFunction declares the signature of a function that can be bound to a Route.
+type RouteFunction func(*Request, *Response)
+
+// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
+type Route struct {
+ Method string
+ Produces []string
+ Consumes []string
+ Path string // webservice root path + described path
+ Function RouteFunction
+ Filters []FilterFunction
+
+ // cached values for dispatching
+ relativePath string
+ pathParts []string
+ pathExpr *pathExpression // cached compilation of relativePath as RegExp
+
+ // documentation
+ Doc string
+ Notes string
+ Operation string
+ ParameterDocs []*Parameter
+ ResponseErrors map[int]ResponseError
+ ReadSample, WriteSample interface{} // structs that model an example request or response payload
+}
+
+// Initialize for Route
+func (r *Route) postBuild() {
+ r.pathParts = tokenizePath(r.Path)
+}
+
+// Create Request and Response from their http versions
+func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ params := r.extractParameters(httpRequest.URL.Path)
+ wrappedRequest := NewRequest(httpRequest)
+ wrappedRequest.pathParameters = params
+ wrappedRequest.selectedRoutePath = r.Path
+ wrappedResponse := NewResponse(httpWriter)
+ wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ wrappedResponse.routeProduces = r.Produces
+ return wrappedRequest, wrappedResponse
+}
+
+// dispatchWithFilters call the function after passing through its own filters
+func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
+ if len(r.Filters) > 0 {
+ chain := FilterChain{Filters: r.Filters, Target: r.Function}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // unfiltered
+ r.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// Return whether the mimeType matches to what this Route can produce.
+func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
+ parts := strings.Split(mimeTypesWithQuality, ",")
+ for _, each := range parts {
+ var withoutQuality string
+ if strings.Contains(each, ";") {
+ withoutQuality = strings.Split(each, ";")[0]
+ } else {
+ withoutQuality = each
+ }
+ // trim before compare
+ withoutQuality = strings.Trim(withoutQuality, " ")
+ if withoutQuality == "*/*" {
+ return true
+ }
+ for _, producibleType := range r.Produces {
+ if producibleType == "*/*" || producibleType == withoutQuality {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+func (r Route) matchesContentType(mimeTypes string) bool {
+
+ if len(r.Consumes) == 0 {
+ // did not specify what it can consume ; any media type (“*/*”) is assumed
+ return true
+ }
+
+ if len(mimeTypes) == 0 {
+ // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
+ m := r.Method
+ if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
+ return true
+ }
+ // proceed with default
+ mimeTypes = MIME_OCTET
+ }
+
+ parts := strings.Split(mimeTypes, ",")
+ for _, each := range parts {
+ var contentType string
+ if strings.Contains(each, ";") {
+ contentType = strings.Split(each, ";")[0]
+ } else {
+ contentType = each
+ }
+ // trim before compare
+ contentType = strings.Trim(contentType, " ")
+ for _, consumeableType := range r.Consumes {
+ if consumeableType == "*/*" || consumeableType == contentType {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Extract the parameters from the request url path
+func (r Route) extractParameters(urlPath string) map[string]string {
+ urlParts := tokenizePath(urlPath)
+ pathParameters := map[string]string{}
+ for i, key := range r.pathParts {
+ var value string
+ if i >= len(urlParts) {
+ value = ""
+ } else {
+ value = urlParts[i]
+ }
+ if strings.HasPrefix(key, "{") { // path-parameter
+ if colon := strings.Index(key, ":"); colon != -1 {
+ // extract by regex
+ regPart := key[colon+1 : len(key)-1]
+ keyPart := key[1:colon]
+ if regPart == "*" {
+ pathParameters[keyPart] = untokenizePath(i, urlParts)
+ break
+ } else {
+ pathParameters[keyPart] = value
+ }
+ } else {
+ // without enclosing {}
+ pathParameters[key[1:len(key)-1]] = value
+ }
+ }
+ }
+ return pathParameters
+}
+
+// Untokenize back into an URL path using the slash separator
+func untokenizePath(offset int, parts []string) string {
+ var buffer bytes.Buffer
+ for p := offset; p < len(parts); p++ {
+ buffer.WriteString(parts[p])
+ // do not end
+ if p < len(parts)-1 {
+ buffer.WriteString("/")
+ }
+ }
+ return buffer.String()
+}
+
+// Tokenize an URL path using the slash separator ; the result does not have empty tokens
+func tokenizePath(path string) []string {
+ if "/" == path {
+ return []string{}
+ }
+ return strings.Split(strings.Trim(path, "/"), "/")
+}
+
+// for debugging
+func (r Route) String() string {
+ return r.Method + " " + r.Path
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go
new file mode 100644
index 0000000..8bc1ab6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go
@@ -0,0 +1,240 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// RouteBuilder is a helper to construct Routes.
+type RouteBuilder struct {
+ rootPath string
+ currentPath string
+ produces []string
+ consumes []string
+ httpMethod string // required
+ function RouteFunction // required
+ filters []FilterFunction
+ // documentation
+ doc string
+ notes string
+ operation string
+ readSample, writeSample interface{}
+ parameters []*Parameter
+ errorMap map[int]ResponseError
+}
+
+// Do evaluates each argument with the RouteBuilder itself.
+// This allows you to follow DRY principles without breaking the fluent programming style.
+// Example:
+// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+// func Returns500(b *RouteBuilder) {
+// b.Returns(500, "Internal Server Error", restful.ServiceError{})
+// }
+func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
+ for _, each := range oneArgBlocks {
+ each(b)
+ }
+ return b
+}
+
+// To bind the route to a function.
+// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
+func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
+ b.function = function
+ return b
+}
+
+// Method specifies what HTTP method to match. Required.
+func (b *RouteBuilder) Method(method string) *RouteBuilder {
+ b.httpMethod = method
+ return b
+}
+
+// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
+func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
+ b.produces = mimeTypes
+ return b
+}
+
+// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
+func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
+ b.consumes = mimeTypes
+ return b
+}
+
+// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
+func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
+ b.currentPath = subPath
+ return b
+}
+
+// Doc tells what this route is all about. Optional.
+func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
+ b.doc = documentation
+ return b
+}
+
+// A verbose explanation of the operation behavior. Optional.
+func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
+ b.notes = notes
+ return b
+}
+
+// Reads tells what resource type will be read from the request payload. Optional.
+// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
+func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
+ b.readSample = sample
+ typeAsName := reflect.TypeOf(sample).String()
+ bodyParameter := &Parameter{&ParameterData{Name: "body"}}
+ bodyParameter.beBody()
+ bodyParameter.Required(true)
+ bodyParameter.DataType(typeAsName)
+ b.Param(bodyParameter)
+ return b
+}
+
+// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
+// Use this to modify or extend information for the Parameter (through its Data()).
+func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
+ for _, each := range b.parameters {
+ if each.Data().Name == name {
+ return each
+ }
+ }
+ return p
+}
+
+// Writes tells what resource type will be written as the response payload. Optional.
+func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
+ b.writeSample = sample
+ return b
+}
+
+// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
+func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
+ if b.parameters == nil {
+ b.parameters = []*Parameter{}
+ }
+ b.parameters = append(b.parameters, parameter)
+ return b
+}
+
+// Operation allows you to document what the actual method/function call is of the Route.
+// Unless called, the operation name is derived from the RouteFunction set using To(..).
+func (b *RouteBuilder) Operation(name string) *RouteBuilder {
+ b.operation = name
+ return b
+}
+
+// ReturnsError is deprecated, use Returns instead.
+func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
+ log.Print("ReturnsError is deprecated, use Returns instead.")
+ return b.Returns(code, message, model)
+}
+
+// Returns allows you to document what responses (errors or regular) can be expected.
+// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
+func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
+ err := ResponseError{
+ Code: code,
+ Message: message,
+ Model: model,
+ }
+ // lazy init because there is no NewRouteBuilder (yet)
+ if b.errorMap == nil {
+ b.errorMap = map[int]ResponseError{}
+ }
+ b.errorMap[code] = err
+ return b
+}
+
+type ResponseError struct {
+ Code int
+ Message string
+ Model interface{}
+}
+
+func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
+ b.rootPath = path
+ return b
+}
+
+// Filter appends a FilterFunction to the end of filters for this Route to build.
+func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
+ b.filters = append(b.filters, filter)
+ return b
+}
+
+// If no specific Route path then set to rootPath
+// If no specific Produces then set to rootProduces
+// If no specific Consumes then set to rootConsumes
+func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
+ if len(b.produces) == 0 {
+ b.produces = rootProduces
+ }
+ if len(b.consumes) == 0 {
+ b.consumes = rootConsumes
+ }
+}
+
+// Build creates a new Route using the specification details collected by the RouteBuilder
+func (b *RouteBuilder) Build() Route {
+ pathExpr, err := newPathExpression(b.currentPath)
+ if err != nil {
+ log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
+ os.Exit(1)
+ }
+ if b.function == nil {
+ log.Printf("[restful] No function specified for route:" + b.currentPath)
+ os.Exit(1)
+ }
+ operationName := b.operation
+ if len(operationName) == 0 && b.function != nil {
+ // extract from definition
+ operationName = nameOfFunction(b.function)
+ }
+ route := Route{
+ Method: b.httpMethod,
+ Path: concatPath(b.rootPath, b.currentPath),
+ Produces: b.produces,
+ Consumes: b.consumes,
+ Function: b.function,
+ Filters: b.filters,
+ relativePath: b.currentPath,
+ pathExpr: pathExpr,
+ Doc: b.doc,
+ Notes: b.notes,
+ Operation: operationName,
+ ParameterDocs: b.parameters,
+ ResponseErrors: b.errorMap,
+ ReadSample: b.readSample,
+ WriteSample: b.writeSample}
+ route.postBuild()
+ return route
+}
+
+func concatPath(path1, path2 string) string {
+ return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+}
+
+// nameOfFunction returns the short name of the function f for documentation.
+// It uses a runtime feature for debugging ; its value may change for later Go versions.
+func nameOfFunction(f interface{}) string {
+ fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
+ tokenized := strings.Split(fun.Name(), ".")
+ last := tokenized[len(tokenized)-1]
+ last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, ")-fm") // Go 1.5
+ last = strings.TrimSuffix(last, "·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, "-fm") // Go 1.5
+ return last
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/router.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/router.go
new file mode 100644
index 0000000..9b32fb6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/router.go
@@ -0,0 +1,18 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "net/http"
+
+// A RouteSelector finds the best matching Route given the input HTTP Request
+type RouteSelector interface {
+
+ // SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
+ // It returns a selected Route and its containing WebService or an error indicating
+ // a problem.
+ SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go
new file mode 100644
index 0000000..62d1108
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go
@@ -0,0 +1,23 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "fmt"
+
+// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
+type ServiceError struct {
+ Code int
+ Message string
+}
+
+// NewError returns a ServiceError using the code and reason
+func NewError(code int, message string) ServiceError {
+ return ServiceError{Code: code, Message: message}
+}
+
+// Error returns a text representation of the service error
+func (s ServiceError) Error() string {
+ return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md
new file mode 100644
index 0000000..736f6f3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md
@@ -0,0 +1,43 @@
+Change history of swagger
+=
+2015-10-16
+- add type override mechanism for swagger models (MR 254, nathanejohnson)
+- replace uses of wildcard in generated apidocs (issue 251)
+
+2015-05-25
+- (api break) changed the type of Properties in Model
+- (api break) changed the type of Models in ApiDeclaration
+- (api break) changed the parameter type of PostBuildDeclarationMapFunc
+
+2015-04-09
+- add ModelBuildable interface for customization of Model
+
+2015-03-17
+- preserve order of Routes per WebService in Swagger listing
+- fix use of $ref and type in Swagger models
+- add api version to listing
+
+2014-11-14
+- operation parameters are now sorted using ordering path,query,form,header,body
+
+2014-11-12
+- respect omitempty tag value for embedded structs
+- expose ApiVersion of WebService to Swagger ApiDeclaration
+
+2014-05-29
+- (api add) Ability to define custom http.Handler to serve swagger-ui static files
+
+2014-05-04
+- (fix) include model for array element type of response
+
+2014-01-03
+- (fix) do not add primitive type to the Api models
+
+2013-11-27
+- (fix) make Swagger work for WebServices with root ("/" or "") paths
+
+2013-10-29
+- (api add) package variable LogInfo to customize logging function
+
+2013-10-15
+- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition) \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md
new file mode 100644
index 0000000..6c27c30
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md
@@ -0,0 +1,76 @@
+How to use Swagger UI with go-restful
+=
+
+Get the Swagger UI sources (version 1.2 only)
+
+ git clone https://github.com/wordnik/swagger-ui.git
+
+The project contains a "dist" folder.
+Its contents has all the Swagger UI files you need.
+
+The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
+You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
+
+Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
+
+ config := swagger.Config{
+ WebServices: restful.RegisteredWebServices(),
+ ApiPath: "/apidocs.json",
+ SwaggerPath: "/apidocs/",
+ SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
+ swagger.InstallSwaggerService(config)
+
+
+Documenting Structs
+--
+
+Currently there are 2 ways to document your structs in the go-restful Swagger.
+
+###### By using struct tags
+- Use tag "description" to annotate a struct field with a description to show in the UI
+- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
+
+###### By using the SwaggerDoc method
+Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
+
+ type Address struct {
+ Country string `json:"country,omitempty"`
+ PostCode int `json:"postcode,omitempty"`
+ }
+
+ func (Address) SwaggerDoc() map[string]string {
+ return map[string]string{
+ "": "Address doc",
+ "country": "Country doc",
+ "postcode": "PostCode doc",
+ }
+ }
+
+This example will generate a JSON like this
+
+ {
+ "Address": {
+ "id": "Address",
+ "description": "Address doc",
+ "properties": {
+ "country": {
+ "type": "string",
+ "description": "Country doc"
+ },
+ "postcode": {
+ "type": "integer",
+ "format": "int32",
+ "description": "PostCode doc"
+ }
+ }
+ }
+ }
+
+**Very Important Notes:**
+- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
+- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
+
+Notes
+--
+- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
+- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go
new file mode 100644
index 0000000..9f4c369
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go
@@ -0,0 +1,64 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// ApiDeclarationList maintains an ordered list of ApiDeclaration.
+type ApiDeclarationList struct {
+ List []ApiDeclaration
+}
+
+// At returns the ApiDeclaration by its path unless absent, then ok is false
+func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
+ for _, each := range l.List {
+ if each.ResourcePath == path {
+ return each, true
+ }
+ }
+ return a, false
+}
+
+// Put adds or replaces a ApiDeclaration with this name
+func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
+ // maybe replace existing
+ for i, each := range l.List {
+ if each.ResourcePath == path {
+ // replace
+ l.List[i] = a
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, a)
+}
+
+// Do enumerates all the properties, each with its assigned name
+func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
+ for _, each := range l.List {
+ block(each.ResourcePath, each)
+ }
+}
+
+// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
+func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.ResourcePath)
+ buf.WriteString("\": ")
+ encoder.Encode(each)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go
new file mode 100644
index 0000000..510d6fc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go
@@ -0,0 +1,38 @@
+package swagger
+
+import (
+ "net/http"
+
+ "github.com/emicklei/go-restful"
+)
+
+// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
+type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
+
+type MapSchemaFormatFunc func(typeName string) string
+
+type Config struct {
+ // url where the services are available, e.g. http://localhost:8080
+ // if left empty then the basePath of Swagger is taken from the actual request
+ WebServicesUrl string
+ // path where the JSON api is avaiable , e.g. /apidocs
+ ApiPath string
+ // [optional] path where the swagger UI will be served, e.g. /swagger
+ SwaggerPath string
+ // [optional] location of folder containing Swagger HTML5 application index.html
+ SwaggerFilePath string
+ // api listing is constructed from this list of restful WebServices.
+ WebServices []*restful.WebService
+ // will serve all static content (scripts,pages,images)
+ StaticHandler http.Handler
+ // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
+ DisableCORS bool
+ // Top-level API version. Is reflected in the resource listing.
+ ApiVersion string
+ // If set then call this handler after building the complete ApiDeclaration Map
+ PostBuildHandler PostBuildDeclarationMapFunc
+ // Swagger global info struct
+ Info Info
+ // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field convertion.
+ SchemaFormatHandler MapSchemaFormatFunc
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go
new file mode 100644
index 0000000..696d192
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go
@@ -0,0 +1,436 @@
+package swagger
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+)
+
+// ModelBuildable is used for extending Structs that need more control over
+// how the Model appears in the Swagger api declaration.
+type ModelBuildable interface {
+ PostBuildModel(m *Model) *Model
+}
+
+type modelBuilder struct {
+ Models *ModelList
+ Config *Config
+}
+
+type documentable interface {
+ SwaggerDoc() map[string]string
+}
+
+// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
+// If it exists, retrive the documentation and overwrite all struct tag descriptions
+func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
+ if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
+ return docable.SwaggerDoc()
+ }
+ return make(map[string]string)
+}
+
+// addModelFrom creates and adds a Model to the builder and detects and calls
+// the post build hook for customizations
+func (b modelBuilder) addModelFrom(sample interface{}) {
+ if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
+ // allow customizations
+ if buildable, ok := sample.(ModelBuildable); ok {
+ modelOrNil = buildable.PostBuildModel(modelOrNil)
+ b.Models.Put(modelOrNil.Id, *modelOrNil)
+ }
+ }
+}
+
+func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
+ modelName := b.keyFrom(st)
+ if nameOverride != "" {
+ modelName = nameOverride
+ }
+ // no models needed for primitive types
+ if b.isPrimitiveType(modelName) {
+ return nil
+ }
+ // see if we already have visited this model
+ if _, ok := b.Models.At(modelName); ok {
+ return nil
+ }
+ sm := Model{
+ Id: modelName,
+ Required: []string{},
+ Properties: ModelPropertyList{}}
+
+ // reference the model before further initializing (enables recursive structs)
+ b.Models.Put(modelName, sm)
+
+ // check for slice or array
+ if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
+ b.addModel(st.Elem(), "")
+ return &sm
+ }
+ // check for structure or primitive type
+ if st.Kind() != reflect.Struct {
+ return &sm
+ }
+
+ fullDoc := getDocFromMethodSwaggerDoc2(st)
+ modelDescriptions := []string{}
+
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
+ if len(modelDescription) > 0 {
+ modelDescriptions = append(modelDescriptions, modelDescription)
+ }
+
+ // add if not omitted
+ if len(jsonName) != 0 {
+ // update description
+ if fieldDoc, ok := fullDoc[jsonName]; ok {
+ prop.Description = fieldDoc
+ }
+ // update Required
+ if b.isPropertyRequired(field) {
+ sm.Required = append(sm.Required, jsonName)
+ }
+ sm.Properties.Put(jsonName, prop)
+ }
+ }
+
+ // We always overwrite documentation if SwaggerDoc method exists
+ // "" is special for documenting the struct itself
+ if modelDoc, ok := fullDoc[""]; ok {
+ sm.Description = modelDoc
+ } else if len(modelDescriptions) != 0 {
+ sm.Description = strings.Join(modelDescriptions, "\n")
+ }
+
+ // update model builder with completed model
+ b.Models.Put(modelName, sm)
+
+ return &sm
+}
+
+func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
+ required := true
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if len(s) > 1 && s[1] == "omitempty" {
+ return false
+ }
+ }
+ return required
+}
+
+func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
+ jsonName = b.jsonNameOfField(field)
+ if len(jsonName) == 0 {
+ // empty name signals skip property
+ return "", "", prop
+ }
+
+ if tag := field.Tag.Get("modelDescription"); tag != "" {
+ modelDescription = tag
+ }
+
+ prop.setPropertyMetadata(field)
+ if prop.Type != nil {
+ return jsonName, modelDescription, prop
+ }
+ fieldType := field.Type
+
+ // check if type is doing its own marshalling
+ marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+ if fieldType.Implements(marshalerType) {
+ var pType = "string"
+ if prop.Type == nil {
+ prop.Type = &pType
+ }
+ if prop.Format == "" {
+ prop.Format = b.jsonSchemaFormat(fieldType.String())
+ }
+ return jsonName, modelDescription, prop
+ }
+
+ // check if annotation says it is a string
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if len(s) > 1 && s[1] == "string" {
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, modelDescription, prop
+ }
+ }
+
+ fieldKind := fieldType.Kind()
+ switch {
+ case fieldKind == reflect.Struct:
+ jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Slice || fieldKind == reflect.Array:
+ jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Ptr:
+ jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.String:
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Map:
+ // if it's a map, it's unstructured, and swagger 1.2 can't handle it
+ objectType := "object"
+ prop.Type = &objectType
+ return jsonName, modelDescription, prop
+ }
+
+ if b.isPrimitiveType(fieldType.String()) {
+ mapped := b.jsonSchemaType(fieldType.String())
+ prop.Type = &mapped
+ prop.Format = b.jsonSchemaFormat(fieldType.String())
+ return jsonName, modelDescription, prop
+ }
+ modelType := fieldType.String()
+ prop.Ref = &modelType
+
+ if fieldType.Name() == "" { // override type of anonymous structs
+ nestedTypeName := modelName + "." + jsonName
+ prop.Ref = &nestedTypeName
+ b.addModel(fieldType, nestedTypeName)
+ }
+ return jsonName, modelDescription, prop
+}
+
+func hasNamedJSONTag(field reflect.StructField) bool {
+ parts := strings.Split(field.Tag.Get("json"), ",")
+ if len(parts) == 0 {
+ return false
+ }
+ for _, s := range parts[1:] {
+ if s == "inline" {
+ return false
+ }
+ }
+ return len(parts[0]) > 0
+}
+
+func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
+ prop.setPropertyMetadata(field)
+ // Check for type override in tag
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+ // check for anonymous
+ if len(fieldType.Name()) == 0 {
+ // anonymous
+ anonType := model.Id + "." + jsonName
+ b.addModel(fieldType, anonType)
+ prop.Ref = &anonType
+ return jsonName, prop
+ }
+
+ if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
+ // embedded struct
+ sub := modelBuilder{new(ModelList), b.Config}
+ sub.addModel(fieldType, "")
+ subKey := sub.keyFrom(fieldType)
+ // merge properties from sub
+ subModel, _ := sub.Models.At(subKey)
+ subModel.Properties.Do(func(k string, v ModelProperty) {
+ model.Properties.Put(k, v)
+ // if subModel says this property is required then include it
+ required := false
+ for _, each := range subModel.Required {
+ if k == each {
+ required = true
+ break
+ }
+ }
+ if required {
+ model.Required = append(model.Required, k)
+ }
+ })
+ // add all new referenced models
+ sub.Models.Do(func(key string, sub Model) {
+ if key != subKey {
+ if _, ok := b.Models.At(key); !ok {
+ b.Models.Put(key, sub)
+ }
+ }
+ })
+ // empty name signals skip property
+ return "", prop
+ }
+ // simple struct
+ b.addModel(fieldType, "")
+ var pType = fieldType.String()
+ prop.Ref = &pType
+ return jsonName, prop
+}
+
+func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
+ // check for type override in tags
+ prop.setPropertyMetadata(field)
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+ var pType = "array"
+ prop.Type = &pType
+ isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
+ elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
+ prop.Items = new(Item)
+ if isPrimitive {
+ mapped := b.jsonSchemaType(elemTypeName)
+ prop.Items.Type = &mapped
+ } else {
+ prop.Items.Ref = &elemTypeName
+ }
+ // add|overwrite model for element type
+ if fieldType.Elem().Kind() == reflect.Ptr {
+ fieldType = fieldType.Elem()
+ }
+ if !isPrimitive {
+ b.addModel(fieldType.Elem(), elemTypeName)
+ }
+ return jsonName, prop
+}
+
+func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
+ prop.setPropertyMetadata(field)
+ // Check for type override in tags
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+
+ // override type of pointer to list-likes
+ if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
+ var pType = "array"
+ prop.Type = &pType
+ isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
+ elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
+ if isPrimitive {
+ primName := b.jsonSchemaType(elemName)
+ prop.Items = &Item{Ref: &primName}
+ } else {
+ prop.Items = &Item{Ref: &elemName}
+ }
+ if !isPrimitive {
+ // add|overwrite model for element type
+ b.addModel(fieldType.Elem().Elem(), elemName)
+ }
+ } else {
+ // non-array, pointer type
+ var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path
+ if b.isPrimitiveType(fieldType.String()[1:]) {
+ prop.Type = &pType
+ prop.Format = b.jsonSchemaFormat(fieldType.String()[1:])
+ return jsonName, prop
+ }
+ prop.Ref = &pType
+ elemName := ""
+ if fieldType.Elem().Name() == "" {
+ elemName = modelName + "." + jsonName
+ prop.Ref = &elemName
+ }
+ b.addModel(fieldType.Elem(), elemName)
+ }
+ return jsonName, prop
+}
+
+func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
+ if t.Kind() == reflect.Ptr {
+ return t.String()[1:]
+ }
+ if t.Name() == "" {
+ return modelName + "." + jsonName
+ }
+ return b.keyFrom(t)
+}
+
+func (b modelBuilder) keyFrom(st reflect.Type) string {
+ key := st.String()
+ if len(st.Name()) == 0 { // unnamed type
+ // Swagger UI has special meaning for [
+ key = strings.Replace(key, "[]", "||", -1)
+ }
+ return key
+}
+
+// see also https://golang.org/ref/spec#Numeric_types
+func (b modelBuilder) isPrimitiveType(modelName string) bool {
+ if len(modelName) == 0 {
+ return false
+ }
+ return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
+}
+
+// jsonNameOfField returns the name of the field as it should appear in JSON format
+// An empty string indicates that this field is not part of the JSON representation
+func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if s[0] == "-" {
+ // empty name signals skip property
+ return ""
+ } else if s[0] != "" {
+ return s[0]
+ }
+ }
+ return field.Name
+}
+
+// see also http://json-schema.org/latest/json-schema-core.html#anchor8
+func (b modelBuilder) jsonSchemaType(modelName string) string {
+ schemaMap := map[string]string{
+ "uint": "integer",
+ "uint8": "integer",
+ "uint16": "integer",
+ "uint32": "integer",
+ "uint64": "integer",
+
+ "int": "integer",
+ "int8": "integer",
+ "int16": "integer",
+ "int32": "integer",
+ "int64": "integer",
+
+ "byte": "integer",
+ "float64": "number",
+ "float32": "number",
+ "bool": "boolean",
+ "time.Time": "string",
+ }
+ mapped, ok := schemaMap[modelName]
+ if !ok {
+ return modelName // use as is (custom or struct)
+ }
+ return mapped
+}
+
+func (b modelBuilder) jsonSchemaFormat(modelName string) string {
+ if b.Config != nil && b.Config.SchemaFormatHandler != nil {
+ if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
+ return mapped
+ }
+ }
+ schemaMap := map[string]string{
+ "int": "int32",
+ "int32": "int32",
+ "int64": "int64",
+ "byte": "byte",
+ "uint": "integer",
+ "uint8": "byte",
+ "float64": "double",
+ "float32": "float",
+ "time.Time": "date-time",
+ "*time.Time": "date-time",
+ }
+ mapped, ok := schemaMap[modelName]
+ if !ok {
+ return "" // no format
+ }
+ return mapped
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go
new file mode 100644
index 0000000..9bb6cb6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go
@@ -0,0 +1,86 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// NamedModel associates a name with a Model (not using its Id)
+type NamedModel struct {
+ Name string
+ Model Model
+}
+
+// ModelList encapsulates a list of NamedModel (association)
+type ModelList struct {
+ List []NamedModel
+}
+
+// Put adds or replaces a Model by its name
+func (l *ModelList) Put(name string, model Model) {
+ for i, each := range l.List {
+ if each.Name == name {
+ // replace
+ l.List[i] = NamedModel{name, model}
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, NamedModel{name, model})
+}
+
+// At returns a Model by its name, ok is false if absent
+func (l *ModelList) At(name string) (m Model, ok bool) {
+ for _, each := range l.List {
+ if each.Name == name {
+ return each.Model, true
+ }
+ }
+ return m, false
+}
+
+// Do enumerates all the models, each with its assigned name
+func (l *ModelList) Do(block func(name string, value Model)) {
+ for _, each := range l.List {
+ block(each.Name, each.Model)
+ }
+}
+
+// MarshalJSON writes the ModelList as if it was a map[string]Model
+func (l ModelList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.Name)
+ buf.WriteString("\": ")
+ encoder.Encode(each.Model)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+// UnmarshalJSON reads back a ModelList. This is an expensive operation.
+func (l *ModelList) UnmarshalJSON(data []byte) error {
+ raw := map[string]interface{}{}
+ json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
+ for k, v := range raw {
+ // produces JSON bytes for each value
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ var m Model
+ json.NewDecoder(bytes.NewReader(data)).Decode(&m)
+ l.Put(k, m)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
new file mode 100644
index 0000000..04fff2c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
@@ -0,0 +1,66 @@
+package swagger
+
+import (
+ "reflect"
+ "strings"
+)
+
+func (prop *ModelProperty) setDescription(field reflect.StructField) {
+ if tag := field.Tag.Get("description"); tag != "" {
+ prop.Description = tag
+ }
+}
+
+func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
+ if tag := field.Tag.Get("default"); tag != "" {
+ prop.DefaultValue = Special(tag)
+ }
+}
+
+func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
+ // We use | to separate the enum values. This value is chosen
+ // since its unlikely to be useful in actual enumeration values.
+ if tag := field.Tag.Get("enum"); tag != "" {
+ prop.Enum = strings.Split(tag, "|")
+ }
+}
+
+func (prop *ModelProperty) setMaximum(field reflect.StructField) {
+ if tag := field.Tag.Get("maximum"); tag != "" {
+ prop.Maximum = tag
+ }
+}
+
+func (prop *ModelProperty) setType(field reflect.StructField) {
+ if tag := field.Tag.Get("type"); tag != "" {
+ prop.Type = &tag
+ }
+}
+
+func (prop *ModelProperty) setMinimum(field reflect.StructField) {
+ if tag := field.Tag.Get("minimum"); tag != "" {
+ prop.Minimum = tag
+ }
+}
+
+func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
+ tag := field.Tag.Get("unique")
+ switch tag {
+ case "true":
+ v := true
+ prop.UniqueItems = &v
+ case "false":
+ v := false
+ prop.UniqueItems = &v
+ }
+}
+
+func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
+ prop.setDescription(field)
+ prop.setEnumValues(field)
+ prop.setMinimum(field)
+ prop.setMaximum(field)
+ prop.setUniqueItems(field)
+ prop.setDefaultValue(field)
+ prop.setType(field)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go
new file mode 100644
index 0000000..3babb19
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go
@@ -0,0 +1,87 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// NamedModelProperty associates a name to a ModelProperty
+type NamedModelProperty struct {
+ Name string
+ Property ModelProperty
+}
+
+// ModelPropertyList encapsulates a list of NamedModelProperty (association)
+type ModelPropertyList struct {
+ List []NamedModelProperty
+}
+
+// At returns the ModelPropety by its name unless absent, then ok is false
+func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
+ for _, each := range l.List {
+ if each.Name == name {
+ return each.Property, true
+ }
+ }
+ return p, false
+}
+
+// Put adds or replaces a ModelProperty with this name
+func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
+ // maybe replace existing
+ for i, each := range l.List {
+ if each.Name == name {
+ // replace
+ l.List[i] = NamedModelProperty{Name: name, Property: prop}
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
+}
+
+// Do enumerates all the properties, each with its assigned name
+func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
+ for _, each := range l.List {
+ block(each.Name, each.Property)
+ }
+}
+
+// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
+func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.Name)
+ buf.WriteString("\": ")
+ encoder.Encode(each.Property)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
+func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
+ raw := map[string]interface{}{}
+ json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
+ for k, v := range raw {
+ // produces JSON bytes for each value
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ var m ModelProperty
+ json.NewDecoder(bytes.NewReader(data)).Decode(&m)
+ l.Put(k, m)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go
new file mode 100644
index 0000000..b33ccfb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go
@@ -0,0 +1,36 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "github.com/emicklei/go-restful"
+
+type orderedRouteMap struct {
+ elements map[string][]restful.Route
+ keys []string
+}
+
+func newOrderedRouteMap() *orderedRouteMap {
+ return &orderedRouteMap{
+ elements: map[string][]restful.Route{},
+ keys: []string{},
+ }
+}
+
+func (o *orderedRouteMap) Add(key string, route restful.Route) {
+ routes, ok := o.elements[key]
+ if ok {
+ routes = append(routes, route)
+ o.elements[key] = routes
+ return
+ }
+ o.elements[key] = []restful.Route{route}
+ o.keys = append(o.keys, key)
+}
+
+func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
+ for _, k := range o.keys {
+ block(k, o.elements[k])
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go
new file mode 100644
index 0000000..967b671
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go
@@ -0,0 +1,184 @@
+// Package swagger implements the structures of the Swagger
+// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
+package swagger
+
+const swaggerVersion = "1.2"
+
+// 4.3.3 Data Type Fields
+type DataTypeFields struct {
+ Type *string `json:"type,omitempty"` // if Ref not used
+ Ref *string `json:"$ref,omitempty"` // if Type not used
+ Format string `json:"format,omitempty"`
+ DefaultValue Special `json:"defaultValue,omitempty"`
+ Enum []string `json:"enum,omitempty"`
+ Minimum string `json:"minimum,omitempty"`
+ Maximum string `json:"maximum,omitempty"`
+ Items *Item `json:"items,omitempty"`
+ UniqueItems *bool `json:"uniqueItems,omitempty"`
+}
+
+type Special string
+
+// 4.3.4 Items Object
+type Item struct {
+ Type *string `json:"type,omitempty"`
+ Ref *string `json:"$ref,omitempty"`
+ Format string `json:"format,omitempty"`
+}
+
+// 5.1 Resource Listing
+type ResourceListing struct {
+ SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
+ Apis []Resource `json:"apis"`
+ ApiVersion string `json:"apiVersion"`
+ Info Info `json:"info"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+}
+
+// 5.1.2 Resource Object
+type Resource struct {
+ Path string `json:"path"` // relative or absolute, must start with /
+ Description string `json:"description"`
+}
+
+// 5.1.3 Info Object
+type Info struct {
+ Title string `json:"title"`
+ Description string `json:"description"`
+ TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
+ Contact string `json:"contact,omitempty"`
+ License string `json:"license,omitempty"`
+ LicenseUrl string `json:"licenseUrl,omitempty"`
+}
+
+// 5.1.5
+type Authorization struct {
+ Type string `json:"type"`
+ PassAs string `json:"passAs"`
+ Keyname string `json:"keyname"`
+ Scopes []Scope `json:"scopes"`
+ GrantTypes []GrantType `json:"grandTypes"`
+}
+
+// 5.1.6, 5.2.11
+type Scope struct {
+ // Required. The name of the scope.
+ Scope string `json:"scope"`
+ // Recommended. A short description of the scope.
+ Description string `json:"description"`
+}
+
+// 5.1.7
+type GrantType struct {
+ Implicit Implicit `json:"implicit"`
+ AuthorizationCode AuthorizationCode `json:"authorization_code"`
+}
+
+// 5.1.8 Implicit Object
+type Implicit struct {
+ // Required. The login endpoint definition.
+ loginEndpoint LoginEndpoint `json:"loginEndpoint"`
+ // An optional alternative name to standard "access_token" OAuth2 parameter.
+ TokenName string `json:"tokenName"`
+}
+
+// 5.1.9 Authorization Code Object
+type AuthorizationCode struct {
+ TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
+ TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
+}
+
+// 5.1.10 Login Endpoint Object
+type LoginEndpoint struct {
+ // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+}
+
+// 5.1.11 Token Request Endpoint Object
+type TokenRequestEndpoint struct {
+ // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+ // An optional alternative name to standard "client_id" OAuth2 parameter.
+ ClientIdName string `json:"clientIdName"`
+ // An optional alternative name to the standard "client_secret" OAuth2 parameter.
+ ClientSecretName string `json:"clientSecretName"`
+}
+
+// 5.1.12 Token Endpoint Object
+type TokenEndpoint struct {
+ // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+ // An optional alternative name to standard "access_token" OAuth2 parameter.
+ TokenName string `json:"tokenName"`
+}
+
+// 5.2 API Declaration
+type ApiDeclaration struct {
+ SwaggerVersion string `json:"swaggerVersion"`
+ ApiVersion string `json:"apiVersion"`
+ BasePath string `json:"basePath"`
+ ResourcePath string `json:"resourcePath"` // must start with /
+ Apis []Api `json:"apis,omitempty"`
+ Models ModelList `json:"models,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+}
+
+// 5.2.2 API Object
+type Api struct {
+ Path string `json:"path"` // relative or absolute, must start with /
+ Description string `json:"description"`
+ Operations []Operation `json:"operations,omitempty"`
+}
+
+// 5.2.3 Operation Object
+type Operation struct {
+ DataTypeFields
+ Method string `json:"method"`
+ Summary string `json:"summary,omitempty"`
+ Notes string `json:"notes,omitempty"`
+ Nickname string `json:"nickname"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+ Parameters []Parameter `json:"parameters"`
+ ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
+ Produces []string `json:"produces,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Deprecated string `json:"deprecated,omitempty"`
+}
+
+// 5.2.4 Parameter Object
+type Parameter struct {
+ DataTypeFields
+ ParamType string `json:"paramType"` // path,query,body,header,form
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Required bool `json:"required"`
+ AllowMultiple bool `json:"allowMultiple"`
+}
+
+// 5.2.5 Response Message Object
+type ResponseMessage struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+ ResponseModel string `json:"responseModel,omitempty"`
+}
+
+// 5.2.6, 5.2.7 Models Object
+type Model struct {
+ Id string `json:"id"`
+ Description string `json:"description,omitempty"`
+ Required []string `json:"required,omitempty"`
+ Properties ModelPropertyList `json:"properties"`
+ SubTypes []string `json:"subTypes,omitempty"`
+ Discriminator string `json:"discriminator,omitempty"`
+}
+
+// 5.2.8 Properties Object
+type ModelProperty struct {
+ DataTypeFields
+ Description string `json:"description,omitempty"`
+}
+
+// 5.2.10
+type Authorizations map[string]Authorization
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go
new file mode 100644
index 0000000..05a3c7e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go
@@ -0,0 +1,21 @@
+package swagger
+
+type SwaggerBuilder struct {
+ SwaggerService
+}
+
+func NewSwaggerBuilder(config Config) *SwaggerBuilder {
+ return &SwaggerBuilder{*newSwaggerService(config)}
+}
+
+func (sb SwaggerBuilder) ProduceListing() ResourceListing {
+ return sb.SwaggerService.produceListing()
+}
+
+func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
+ return sb.SwaggerService.produceAllDeclarations()
+}
+
+func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
+ return sb.SwaggerService.produceDeclarations(route)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
new file mode 100644
index 0000000..58dd625
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
@@ -0,0 +1,440 @@
+package swagger
+
+import (
+ "fmt"
+
+ "github.com/emicklei/go-restful"
+ // "github.com/emicklei/hopwatch"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+type SwaggerService struct {
+ config Config
+ apiDeclarationMap *ApiDeclarationList
+}
+
+func newSwaggerService(config Config) *SwaggerService {
+ sws := &SwaggerService{
+ config: config,
+ apiDeclarationMap: new(ApiDeclarationList)}
+
+ // Build all ApiDeclarations
+ for _, each := range config.WebServices {
+ rootPath := each.RootPath()
+ // skip the api service itself
+ if rootPath != config.ApiPath {
+ if rootPath == "" || rootPath == "/" {
+ // use routes
+ for _, route := range each.Routes() {
+ entry := staticPathFromRoute(route)
+ _, exists := sws.apiDeclarationMap.At(entry)
+ if !exists {
+ sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
+ }
+ }
+ } else { // use root path
+ sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
+ }
+ }
+ }
+
+ // if specified then call the PostBuilderHandler
+ if config.PostBuildHandler != nil {
+ config.PostBuildHandler(sws.apiDeclarationMap)
+ }
+ return sws
+}
+
+// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
+var LogInfo = func(format string, v ...interface{}) {
+ // use the restful package-wide logger
+ log.Printf(format, v...)
+}
+
+// InstallSwaggerService add the WebService that provides the API documentation of all services
+// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
+func InstallSwaggerService(aSwaggerConfig Config) {
+ RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
+}
+
+// RegisterSwaggerService add the WebService that provides the API documentation of all services
+// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
+func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
+ sws := newSwaggerService(config)
+ ws := new(restful.WebService)
+ ws.Path(config.ApiPath)
+ ws.Produces(restful.MIME_JSON)
+ if config.DisableCORS {
+ ws.Filter(enableCORS)
+ }
+ ws.Route(ws.GET("/").To(sws.getListing))
+ ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
+ LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
+ wsContainer.Add(ws)
+
+ // Check paths for UI serving
+ if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
+ swaggerPathSlash := config.SwaggerPath
+ // path must end with slash /
+ if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
+ LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
+ swaggerPathSlash += "/"
+ }
+
+ LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
+ wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
+
+ //if we define a custom static handler use it
+ } else if config.StaticHandler != nil && config.SwaggerPath != "" {
+ swaggerPathSlash := config.SwaggerPath
+ // path must end with slash /
+ if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
+ LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
+ swaggerPathSlash += "/"
+
+ }
+ LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
+ wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
+
+ } else {
+ LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
+ }
+}
+
+func staticPathFromRoute(r restful.Route) string {
+ static := r.Path
+ bracket := strings.Index(static, "{")
+ if bracket <= 1 { // result cannot be empty
+ return static
+ }
+ if bracket != -1 {
+ static = r.Path[:bracket]
+ }
+ if strings.HasSuffix(static, "/") {
+ return static[:len(static)-1]
+ } else {
+ return static
+ }
+}
+
+func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
+ if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
+ // prevent duplicate header
+ if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
+ resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
+ }
+ }
+ chain.ProcessFilter(req, resp)
+}
+
+func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
+ listing := sws.produceListing()
+ resp.WriteAsJson(listing)
+}
+
+func (sws SwaggerService) produceListing() ResourceListing {
+ listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
+ sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
+ ref := Resource{Path: k}
+ if len(v.Apis) > 0 { // use description of first (could still be empty)
+ ref.Description = v.Apis[0].Description
+ }
+ listing.Apis = append(listing.Apis, ref)
+ })
+ return listing
+}
+
+func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
+ decl, ok := sws.produceDeclarations(composeRootPath(req))
+ if !ok {
+ resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
+ return
+ }
+ // unless WebServicesUrl is given
+ if len(sws.config.WebServicesUrl) == 0 {
+ // update base path from the actual request
+ // TODO how to detect https? assume http for now
+ var host string
+ // X-Forwarded-Host or Host or Request.Host
+ hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
+ if !ok || len(hostvalues) == 0 {
+ forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
+ if !ok || len(forwarded) == 0 {
+ // fallback to Host field
+ host = req.Request.Host
+ } else {
+ host = forwarded[0]
+ }
+ } else {
+ host = hostvalues[0]
+ }
+ // inspect Referer for the scheme (http vs https)
+ scheme := "http"
+ if referer := req.Request.Header["Referer"]; len(referer) > 0 {
+ if strings.HasPrefix(referer[0], "https") {
+ scheme = "https"
+ }
+ }
+ decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
+ }
+ resp.WriteAsJson(decl)
+}
+
+func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
+ decls := map[string]ApiDeclaration{}
+ sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
+ decls[k] = v
+ })
+ return decls
+}
+
+func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
+ decl, ok := sws.apiDeclarationMap.At(route)
+ if !ok {
+ return nil, false
+ }
+ decl.BasePath = sws.config.WebServicesUrl
+ return &decl, true
+}
+
+// composeDeclaration uses all routes and parameters to create a ApiDeclaration
+func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
+ decl := ApiDeclaration{
+ SwaggerVersion: swaggerVersion,
+ BasePath: sws.config.WebServicesUrl,
+ ResourcePath: pathPrefix,
+ Models: ModelList{},
+ ApiVersion: ws.Version()}
+
+ // collect any path parameters
+ rootParams := []Parameter{}
+ for _, param := range ws.PathParameters() {
+ rootParams = append(rootParams, asSwaggerParameter(param.Data()))
+ }
+ // aggregate by path
+ pathToRoutes := newOrderedRouteMap()
+ for _, other := range ws.Routes() {
+ if strings.HasPrefix(other.Path, pathPrefix) {
+ pathToRoutes.Add(other.Path, other)
+ }
+ }
+ pathToRoutes.Do(func(path string, routes []restful.Route) {
+ api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
+ voidString := "void"
+ for _, route := range routes {
+ operation := Operation{
+ Method: route.Method,
+ Summary: route.Doc,
+ Notes: route.Notes,
+ // Type gets overwritten if there is a write sample
+ DataTypeFields: DataTypeFields{Type: &voidString},
+ Parameters: []Parameter{},
+ Nickname: route.Operation,
+ ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
+
+ operation.Consumes = route.Consumes
+ operation.Produces = route.Produces
+
+ // share root params if any
+ for _, swparam := range rootParams {
+ operation.Parameters = append(operation.Parameters, swparam)
+ }
+ // route specific params
+ for _, param := range route.ParameterDocs {
+ operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
+ }
+
+ sws.addModelsFromRouteTo(&operation, route, &decl)
+ api.Operations = append(api.Operations, operation)
+ }
+ decl.Apis = append(decl.Apis, api)
+ })
+ return decl
+}
+
+func withoutWildcard(path string) string {
+ if strings.HasSuffix(path, ":*}") {
+ return path[0:len(path)-3] + "}"
+ }
+ return path
+}
+
+// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
+func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
+ if route.ResponseErrors == nil {
+ return messages
+ }
+ // sort by code
+ codes := sort.IntSlice{}
+ for code, _ := range route.ResponseErrors {
+ codes = append(codes, code)
+ }
+ codes.Sort()
+ for _, code := range codes {
+ each := route.ResponseErrors[code]
+ message := ResponseMessage{
+ Code: code,
+ Message: each.Message,
+ }
+ if each.Model != nil {
+ st := reflect.TypeOf(each.Model)
+ isCollection, st := detectCollectionType(st)
+ modelName := modelBuilder{}.keyFrom(st)
+ if isCollection {
+ modelName = "array[" + modelName + "]"
+ }
+ modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
+ // reference the model
+ message.ResponseModel = modelName
+ }
+ messages = append(messages, message)
+ }
+ return
+}
+
+// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
+func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
+ if route.ReadSample != nil {
+ sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
+ }
+ if route.WriteSample != nil {
+ sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
+ }
+}
+
+func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
+ isCollection := false
+ if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
+ st = st.Elem()
+ isCollection = true
+ } else {
+ if st.Kind() == reflect.Ptr {
+ if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
+ st = st.Elem().Elem()
+ isCollection = true
+ }
+ }
+ }
+ return isCollection, st
+}
+
+// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
+func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
+ if isResponse {
+ type_, items := asDataType(sample, &sws.config)
+ operation.Type = type_
+ operation.Items = items
+ }
+ modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
+}
+
+func asSwaggerParameter(param restful.ParameterData) Parameter {
+ return Parameter{
+ DataTypeFields: DataTypeFields{
+ Type: &param.DataType,
+ Format: asFormat(param.DataType, param.DataFormat),
+ DefaultValue: Special(param.DefaultValue),
+ },
+ Name: param.Name,
+ Description: param.Description,
+ ParamType: asParamType(param.Kind),
+
+ Required: param.Required}
+}
+
+// Between 1..7 path parameters is supported
+func composeRootPath(req *restful.Request) string {
+ path := "/" + req.PathParameter("a")
+ b := req.PathParameter("b")
+ if b == "" {
+ return path
+ }
+ path = path + "/" + b
+ c := req.PathParameter("c")
+ if c == "" {
+ return path
+ }
+ path = path + "/" + c
+ d := req.PathParameter("d")
+ if d == "" {
+ return path
+ }
+ path = path + "/" + d
+ e := req.PathParameter("e")
+ if e == "" {
+ return path
+ }
+ path = path + "/" + e
+ f := req.PathParameter("f")
+ if f == "" {
+ return path
+ }
+ path = path + "/" + f
+ g := req.PathParameter("g")
+ if g == "" {
+ return path
+ }
+ return path + "/" + g
+}
+
+func asFormat(dataType string, dataFormat string) string {
+ if dataFormat != "" {
+ return dataFormat
+ }
+ return "" // TODO
+}
+
+func asParamType(kind int) string {
+ switch {
+ case kind == restful.PathParameterKind:
+ return "path"
+ case kind == restful.QueryParameterKind:
+ return "query"
+ case kind == restful.BodyParameterKind:
+ return "body"
+ case kind == restful.HeaderParameterKind:
+ return "header"
+ case kind == restful.FormParameterKind:
+ return "form"
+ }
+ return ""
+}
+
+func asDataType(any interface{}, config *Config) (*string, *Item) {
+ // If it's not a collection, return the suggested model name
+ st := reflect.TypeOf(any)
+ isCollection, st := detectCollectionType(st)
+ modelName := modelBuilder{}.keyFrom(st)
+ // if it's not a collection we are done
+ if !isCollection {
+ return &modelName, nil
+ }
+
+ // XXX: This is not very elegant
+ // We create an Item object referring to the given model
+ models := ModelList{}
+ mb := modelBuilder{Models: &models, Config: config}
+ mb.addModelFrom(any)
+
+ elemTypeName := mb.getElementTypeName(modelName, "", st)
+ item := new(Item)
+ if mb.isPrimitiveType(elemTypeName) {
+ mapped := mb.jsonSchemaType(elemTypeName)
+ item.Type = &mapped
+ } else {
+ item.Ref = &elemTypeName
+ }
+ tmp := "array"
+ return &tmp, item
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go
new file mode 100644
index 0000000..24fc532
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go
@@ -0,0 +1,268 @@
+package restful
+
+import (
+ "fmt"
+ "os"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
+type WebService struct {
+ rootPath string
+ pathExpr *pathExpression // cached compilation of rootPath as RegExp
+ routes []Route
+ produces []string
+ consumes []string
+ pathParameters []*Parameter
+ filters []FilterFunction
+ documentation string
+ apiVersion string
+
+ dynamicRoutes bool
+
+ // protects 'routes' if dynamic routes are enabled
+ routesLock sync.RWMutex
+}
+
+func (w *WebService) SetDynamicRoutes(enable bool) {
+ w.dynamicRoutes = enable
+}
+
+// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
+func (w *WebService) compilePathExpression() {
+ compiled, err := newPathExpression(w.rootPath)
+ if err != nil {
+ log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
+ os.Exit(1)
+ }
+ w.pathExpr = compiled
+}
+
+// ApiVersion sets the API version for documentation purposes.
+func (w *WebService) ApiVersion(apiVersion string) *WebService {
+ w.apiVersion = apiVersion
+ return w
+}
+
+// Version returns the API version for documentation purposes.
+func (w WebService) Version() string { return w.apiVersion }
+
+// Path specifies the root URL template path of the WebService.
+// All Routes will be relative to this path.
+func (w *WebService) Path(root string) *WebService {
+ w.rootPath = root
+ if len(w.rootPath) == 0 {
+ w.rootPath = "/"
+ }
+ w.compilePathExpression()
+ return w
+}
+
+// Param adds a PathParameter to document parameters used in the root path.
+func (w *WebService) Param(parameter *Parameter) *WebService {
+ if w.pathParameters == nil {
+ w.pathParameters = []*Parameter{}
+ }
+ w.pathParameters = append(w.pathParameters, parameter)
+ return w
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) PathParameter(name, description string) *Parameter {
+ return PathParameter(name, description)
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func PathParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
+ p.bePath()
+ return p
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) QueryParameter(name, description string) *Parameter {
+ return QueryParameter(name, description)
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func QueryParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beQuery()
+ return p
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func (w *WebService) BodyParameter(name, description string) *Parameter {
+ return BodyParameter(name, description)
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func BodyParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
+ p.beBody()
+ return p
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) HeaderParameter(name, description string) *Parameter {
+ return HeaderParameter(name, description)
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func HeaderParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beHeader()
+ return p
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) FormParameter(name, description string) *Parameter {
+ return FormParameter(name, description)
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func FormParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beForm()
+ return p
+}
+
+// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
+func (w *WebService) Route(builder *RouteBuilder) *WebService {
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ builder.copyDefaults(w.produces, w.consumes)
+ w.routes = append(w.routes, builder.Build())
+ return w
+}
+
+// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
+func (w *WebService) RemoveRoute(path, method string) error {
+ if !w.dynamicRoutes {
+ return fmt.Errorf("dynamic routes are not enabled.")
+ }
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ newRoutes := make([]Route, (len(w.routes) - 1))
+ current := 0
+ for ix := range w.routes {
+ if w.routes[ix].Method == method && w.routes[ix].Path == path {
+ continue
+ }
+ newRoutes[current] = w.routes[ix]
+ current = current + 1
+ }
+ w.routes = newRoutes
+ return nil
+}
+
+// Method creates a new RouteBuilder and initialize its http method
+func (w *WebService) Method(httpMethod string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
+}
+
+// Produces specifies that this WebService can produce one or more MIME types.
+// Http requests must have one of these values set for the Accept header.
+func (w *WebService) Produces(contentTypes ...string) *WebService {
+ w.produces = contentTypes
+ return w
+}
+
+// Consumes specifies that this WebService can consume one or more MIME types.
+// Http requests must have one of these values set for the Content-Type header.
+func (w *WebService) Consumes(accepts ...string) *WebService {
+ w.consumes = accepts
+ return w
+}
+
+// Routes returns the Routes associated with this WebService
+func (w WebService) Routes() []Route {
+ if !w.dynamicRoutes {
+ return w.routes
+ }
+ // Make a copy of the array to prevent concurrency problems
+ w.routesLock.RLock()
+ defer w.routesLock.RUnlock()
+ result := make([]Route, len(w.routes))
+ for ix := range w.routes {
+ result[ix] = w.routes[ix]
+ }
+ return result
+}
+
+// RootPath returns the RootPath associated with this WebService. Default "/"
+func (w WebService) RootPath() string {
+ return w.rootPath
+}
+
+// PathParameters return the path parameter names for (shared amoung its Routes)
+func (w WebService) PathParameters() []*Parameter {
+ return w.pathParameters
+}
+
+// Filter adds a filter function to the chain of filters applicable to all its Routes
+func (w *WebService) Filter(filter FilterFunction) *WebService {
+ w.filters = append(w.filters, filter)
+ return w
+}
+
+// Doc is used to set the documentation of this service.
+func (w *WebService) Doc(plainText string) *WebService {
+ w.documentation = plainText
+ return w
+}
+
+// Documentation returns it.
+func (w WebService) Documentation() string {
+ return w.documentation
+}
+
+/*
+ Convenience methods
+*/
+
+// HEAD is a shortcut for .Method("HEAD").Path(subPath)
+func (w *WebService) HEAD(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
+}
+
+// GET is a shortcut for .Method("GET").Path(subPath)
+func (w *WebService) GET(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
+}
+
+// POST is a shortcut for .Method("POST").Path(subPath)
+func (w *WebService) POST(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
+}
+
+// PUT is a shortcut for .Method("PUT").Path(subPath)
+func (w *WebService) PUT(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
+}
+
+// PATCH is a shortcut for .Method("PATCH").Path(subPath)
+func (w *WebService) PATCH(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
+}
+
+// DELETE is a shortcut for .Method("DELETE").Path(subPath)
+func (w *WebService) DELETE(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go
new file mode 100644
index 0000000..c9d31b0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go
@@ -0,0 +1,39 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+)
+
+// DefaultContainer is a restful.Container that uses http.DefaultServeMux
+var DefaultContainer *Container
+
+func init() {
+ DefaultContainer = NewContainer()
+ DefaultContainer.ServeMux = http.DefaultServeMux
+}
+
+// If set the true then panics will not be caught to return HTTP 500.
+// In that case, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
+var DoNotRecover = false
+
+// Add registers a new WebService add it to the DefaultContainer.
+func Add(service *WebService) {
+ DefaultContainer.Add(service)
+}
+
+// Filter appends a container FilterFunction from the DefaultContainer.
+// These are called before dispatching a http.Request to a WebService.
+func Filter(filter FilterFunction) {
+ DefaultContainer.Filter(filter)
+}
+
+// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
+func RegisteredWebServices() []*WebService {
+ return DefaultContainer.RegisteredWebServices()
+}
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE b/src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000..7805d36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/README.md b/src/kube2msb/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000..f8f7e36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,116 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"name"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err := yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/fields.go b/src/kube2msb/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000..0bd3c2b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,497 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go b/src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000..c02beac
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: ", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: ", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE b/src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000..335e38e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,36 @@
+Extensions for Protocol Buffers to create more go like structures.
+
+Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+http://github.com/gogo/protobuf/gogoproto
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000..23a6b17
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto
+ make
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000..79edb86
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,228 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
+ emOut := out.Addr().Interface().(extensionsMap)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+ emOut := out.Addr().Interface().(extensionsBytes)
+ bIn := emIn.GetExtensions()
+ bOut := emOut.GetExtensions()
+ *bOut = append(*bOut, *bIn...)
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000..cb5b213
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ if ee, eok := e.(extensionsMap); eok {
+ ext := ee.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ ee.ExtensionMap()[int32(tag)] = ext
+ } else if ee, eok := e.(extensionsBytes); eok {
+ ext := ee.GetExtensions()
+ *ext = append(*ext, o.buf[oi:o.index]...)
+ }
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go
new file mode 100644
index 0000000..6a77aad
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+// Decode a reference to a struct pointer.
+func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is a pointer receiver")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ bas := structPointer_FieldPointer(base, p.field)
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers ([]struct).
+func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error {
+ newBas := appendStructPointer(base, p.field, p.sstype)
+
+ if is_group {
+ panic("not supported, maybe in future, if requested.")
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is not a pointer receiver.")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, newBas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers.
+func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_ref_struct(p, false, base)
+}
+
+func setPtrCustomType(base structPointer, f field, v interface{}) {
+ if v == nil {
+ return
+ }
+ structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer()))
+}
+
+func setCustomType(base structPointer, f field, value interface{}) {
+ if value == nil {
+ return
+ }
+ v := reflect.ValueOf(value).Elem()
+ t := reflect.TypeOf(value).Elem()
+ kind := t.Kind()
+ switch kind {
+ case reflect.Slice:
+ slice := reflect.MakeSlice(t, v.Len(), v.Cap())
+ reflect.Copy(slice, v)
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ oldHeader.Data = slice.Pointer()
+ oldHeader.Len = v.Len()
+ oldHeader.Cap = v.Cap()
+ default:
+ l := 1
+ size := reflect.TypeOf(value).Elem().Size()
+ if kind == reflect.Array {
+ l = reflect.TypeOf(value).Elem().Len()
+ size = reflect.TypeOf(value).Size()
+ }
+ total := int(size) * l
+ structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total)
+ }
+}
+
+func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ setPtrCustomType(base, p.field, custom)
+ return nil
+}
+
+func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ if custom != nil {
+ setCustomType(base, p.field, custom)
+ }
+ return nil
+}
+
+// Decode a slice of bytes ([]byte) into a slice of custom types.
+func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ newBas := appendStructPointer(base, p.field, p.ctype)
+
+ setCustomType(newBas, 0, custom)
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000..7321e1a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,1335 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ // TODO: This could be faster and use less reflection.
+ if prop.oneofMarshaler != nil {
+ sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem()
+ for i := 0; i < prop.stype.NumField(); i++ {
+ fv := sv.Field(i)
+ if fv.Kind() != reflect.Interface || fv.IsNil() {
+ continue
+ }
+ if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" {
+ continue
+ }
+ spv := fv.Elem() // interface -> *T
+ sv := spv.Elem() // *T -> T
+ sf := sv.Type().Field(0) // StructField inside T
+ var prop Properties
+ prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf)
+ n += prop.size(&prop, toStructPointer(spv))
+ }
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000..f77cfb1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,354 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// http://github.com/golang/protobuf/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+ return &RequiredNotSetError{field}
+}
+
+type Sizer interface {
+ Size() int
+}
+
+func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, s...)
+ return nil
+}
+
+func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(s)
+ return
+}
+
+// Encode a reference to bool pointer.
+func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ x := 0
+ if v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_bool(p *Properties, base structPointer) int {
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode a reference to int32 pointer.
+func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a reference to an int64 pointer.
+func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_ref_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a reference to a string pointer.
+func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_ref_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// Encode a reference to a message struct.
+func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+//TODO this is only copied, please fix this
+func size_ref_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a slice of references to message struct pointers ([]struct).
+func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ }
+ return state.err
+}
+
+//TODO this is only copied, please fix this
+func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error {
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return ErrNil
+ }
+ custom := i.(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error {
+ custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_ref_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceAt(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return ErrNil
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ }
+ return nil
+}
+
+func size_custom_slice_bytes(p *Properties, base structPointer) (n int) {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return 0
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000..cc3f2c9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN.
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+func equalAny(v1, v2 reflect.Value) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem())
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000..9a6374f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,519 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+}
+
+type extensionsMap interface {
+ extendableProto
+ ExtensionMap() map[int32]Extension
+}
+
+type extensionsBytes interface {
+ extendableProto
+ GetExtensions() *[]byte
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ if ebase, ok := base.(extensionsMap); ok {
+ ebase.ExtensionMap()[id] = Extension{enc: b}
+ } else if ebase, ok := base.(extensionsBytes); ok {
+ clearExtension(base, id)
+ ext := ebase.GetExtensions()
+ *ext = append(*ext, b...)
+ } else {
+ panic("unreachable")
+ }
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ err := encodeExtension(&e)
+ if err != nil {
+ return err
+ }
+ m[k] = e
+ }
+ return nil
+}
+
+func encodeExtension(e *Extension) error {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ return nil
+ }
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ if epb, doki := pb.(extensionsMap); doki {
+ _, ok := epb.ExtensionMap()[extension.Field]
+ return ok
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ buf := *ext
+ o := 0
+ for o < len(buf) {
+ tag, n := DecodeVarint(buf[o:])
+ fieldNum := int32(tag >> 3)
+ if int32(fieldNum) == extension.Field {
+ return true
+ }
+ wireType := int(tag & 0x7)
+ o += n
+ l, err := size(buf[o:], wireType)
+ if err != nil {
+ return false
+ }
+ o += l
+ }
+ return false
+ }
+ panic("unreachable")
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+ ext := pb.GetExtensions()
+ for offset < len(*ext) {
+ tag, n1 := DecodeVarint((*ext)[offset:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ n2, err := size((*ext)[offset+n1:], wireType)
+ if err != nil {
+ panic(err)
+ }
+ newOffset := offset + n1 + n2
+ if fieldNum == theFieldNum {
+ *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+ return offset
+ }
+ offset = newOffset
+ }
+ return -1
+}
+
+func clearExtension(pb extendableProto, fieldNum int32) {
+ if epb, doki := pb.(extensionsMap); doki {
+ delete(epb.ExtensionMap(), fieldNum)
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ offset := 0
+ for offset != -1 {
+ offset = deleteExtension(epb, fieldNum, offset)
+ }
+ } else {
+ panic("unreachable")
+ }
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ clearExtension(pb, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ if epb, doki := pb.(extensionsMap); doki {
+ emap := epb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ o := 0
+ for o < len(*ext) {
+ tag, n := DecodeVarint((*ext)[o:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size((*ext)[o+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ if int32(fieldNum) == extension.Field {
+ v, err := decodeExtension((*ext)[o:o+n+l], extension)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ }
+ o += n + l
+ }
+ return defaultExtensionValue(extension)
+ }
+ panic("unreachable")
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ rep := extension.repeated()
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if !rep || o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+ return setExtension(pb, extension, value)
+}
+
+func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if epb, doki := pb.(extensionsMap); doki {
+ epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ClearExtension(pb, extension)
+ ext := epb.GetExtensions()
+ et := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+ p := NewBuffer(nil)
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ *ext = append(*ext, p.buf...)
+ }
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000..bd55fb6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
+ if reflect.ValueOf(pb).IsNil() {
+ return ifnotset
+ }
+ value, err := GetExtension(pb, extension)
+ if err != nil {
+ return ifnotset
+ }
+ if value == nil {
+ return ifnotset
+ }
+ if value.(*bool) == nil {
+ return ifnotset
+ }
+ return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+ return bytes.Equal(this.enc, that.enc)
+}
+
+func SizeOfExtensionMap(m map[int32]Extension) (n int) {
+ return sizeExtensionMap(m)
+}
+
+type sortableMapElem struct {
+ field int32
+ ext Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+ s := make(sortableExtensions, 0, len(m))
+ for k, v := range m {
+ s = append(s, &sortableMapElem{field: k, ext: v})
+ }
+ return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+ sort.Sort(this)
+ ss := make([]string, len(this))
+ for i := range this {
+ ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+ }
+ return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+ return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+ m, err := BytesToExtensionsMap(ext)
+ if err != nil {
+ panic(err)
+ }
+ return StringFromExtensionsMap(m)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return 0, err
+ }
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ n += copy(data[n:], m[int32(k)].enc)
+ }
+ return n, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+ if m[id].value == nil || m[id].desc == nil {
+ return m[id].enc, nil
+ }
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+ return m[id].enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+ switch wire {
+ case WireVarint:
+ _, n := DecodeVarint(buf)
+ return n, nil
+ case WireFixed64:
+ return 8, nil
+ case WireBytes:
+ v, n := DecodeVarint(buf)
+ return int(v) + n, nil
+ case WireFixed32:
+ return 4, nil
+ case WireStartGroup:
+ offset := 0
+ for {
+ u, n := DecodeVarint(buf[offset:])
+ fwire := int(u & 0x7)
+ offset += n
+ if fwire == WireEndGroup {
+ return offset, nil
+ }
+ s, err := size(buf[offset:], wire)
+ if err != nil {
+ return 0, err
+ }
+ offset += s
+ }
+ }
+ return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+ m := make(map[int32]Extension)
+ i := 0
+ for i < len(buf) {
+ tag, n := DecodeVarint(buf[i:])
+ if n <= 0 {
+ return nil, fmt.Errorf("unable to decode varint")
+ }
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size(buf[i+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ end := i + int(l) + n
+ m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+ i = end
+ }
+ return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+ ee := Extension{enc: make([]byte, len(e))}
+ copy(ee.enc, e)
+ return ee
+}
+
+func (this Extension) GoString() string {
+ if this.enc == nil {
+ if err := encodeExtension(&this); err != nil {
+ panic(err)
+ }
+ }
+ return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return setExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+ }
+ return GetExtension(pb, desc)
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000..8ffa91a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,883 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/gogo/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/gogo/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000..a6c2c06
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+ s, ok := m[value]
+ if !ok {
+ s = strconv.Itoa(int(value))
+ }
+ return json.Marshal(s)
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000..e25e01e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..749919d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..e9be0fe
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000..6bc85fa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ return r.Interface()
+}
+
+func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ if r.Elem().IsNil() {
+ return nil
+ }
+ return r.Elem().Interface()
+}
+
+func copyUintPtr(oldptr, newptr uintptr, size int) {
+ oldbytes := make([]byte, 0)
+ oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
+ oldslice.Data = oldptr
+ oldslice.Len = size
+ oldslice.Cap = size
+ newbytes := make([]byte, 0)
+ newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
+ newslice.Data = newptr
+ newslice.Len = size
+ newslice.Cap = size
+ copy(newbytes, oldbytes)
+}
+
+func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
+ copyUintPtr(uintptr(oldptr), uintptr(newptr), size)
+}
+
+func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
+ size := typ.Elem().Size()
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ newLen := oldHeader.Len + 1
+ slice := reflect.MakeSlice(typ, newLen, newLen)
+ bas := toStructPointer(slice)
+ for i := 0; i < oldHeader.Len; i++ {
+ newElemptr := uintptr(bas) + uintptr(i)*size
+ oldElemptr := oldHeader.Data + uintptr(i)*size
+ copyUintPtr(oldElemptr, newElemptr, int(size))
+ }
+
+ oldHeader.Data = uintptr(bas)
+ oldHeader.Len = newLen
+ oldHeader.Cap = newLen
+
+ return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size)))
+}
+
+func structPointer_FieldPointer(p structPointer, f field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
+ return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_Add(p structPointer, size field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size)))
+}
+
+func structPointer_Len(p structPointer, f field) int {
+ return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000..4711057
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,915 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ CustomType string
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sstype reflect.Type // set for slices of structs types only
+ ctype reflect.Type // set for custom types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ case strings.HasPrefix(f, "embedded="):
+ p.OrigName = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "customtype="):
+ p.CustomType = strings.Split(f, "=")[1]
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+ if len(p.CustomType) > 0 {
+ p.setCustomEncAndDec(typ)
+ p.setTag(lockGetProp)
+ return
+ }
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ } else {
+ p.enc = (*Buffer).enc_ref_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_ref_bool
+ }
+ case reflect.Int32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ } else {
+ p.enc = (*Buffer).enc_ref_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_int32
+ }
+ case reflect.Uint32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_ref_uint32
+ }
+ case reflect.Int64, reflect.Uint64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.Float32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_uint32
+ }
+ case reflect.Float64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.String:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+ } else {
+ p.enc = (*Buffer).enc_ref_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_ref_string
+ }
+ case reflect.Struct:
+ p.stype = typ
+ p.isMarshaler = isMarshaler(typ)
+ p.isUnmarshaler = isUnmarshaler(typ)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_ref_struct_message
+ p.dec = (*Buffer).dec_ref_struct_message
+ p.size = size_ref_struct_message
+ } else {
+ fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ)
+ }
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ case reflect.Struct:
+ p.setSliceOfNonPointerStructs(t1)
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+ p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ isOneofMessage := false
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ if len(f.Tag.Get("protobuf")) > 0 {
+ p.enc = (*Buffer).enc_ext_slice_byte
+ p.dec = nil // not needed
+ p.size = size_ext_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ if oneof {
+ isOneofMessage = true
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+ if _, ok := enumStringMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000..8daf9f7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+)
+
+func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
+ p.ctype = typ
+ if p.Repeated {
+ p.enc = (*Buffer).enc_custom_slice_bytes
+ p.dec = (*Buffer).dec_custom_slice_bytes
+ p.size = size_custom_slice_bytes
+ } else if typ.Kind() == reflect.Ptr {
+ p.enc = (*Buffer).enc_custom_bytes
+ p.dec = (*Buffer).dec_custom_bytes
+ p.size = size_custom_bytes
+ } else {
+ p.enc = (*Buffer).enc_custom_ref_bytes
+ p.dec = (*Buffer).dec_custom_ref_bytes
+ p.size = size_custom_ref_bytes
+ }
+}
+
+func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
+ t2 := typ.Elem()
+ p.sstype = typ
+ p.stype = t2
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ p.enc = (*Buffer).enc_slice_ref_struct_message
+ p.dec = (*Buffer).dec_slice_ref_struct_message
+ p.size = size_slice_ref_struct_message
+ if p.Wire != "bytes" {
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000..4fe7e08
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,117 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "io"
+)
+
+func Skip(data []byte) (n int, err error) {
+ l := len(data)
+ index := 0
+ for index < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ index++
+ if data[index-1] < 0x80 {
+ break
+ }
+ }
+ return index, nil
+ case 1:
+ index += 8
+ return index, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ index += length
+ return index, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = index
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := Skip(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ index = start + next
+ }
+ return index, nil
+ case 4:
+ return index, nil
+ case 5:
+ index += 4
+ return index, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000..7c9ae90
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,793 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, v, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props.Parse(tag) // Overwrite the outer props.
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, fv, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv
+ if pv.CanAddr() {
+ pv = sv.Addr()
+ } else {
+ pv = reflect.New(sv.Type())
+ pv.Elem().Set(sv)
+ }
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ if props != nil && len(props.CustomType) > 0 {
+ custom, ok := v.Interface().(Marshaler)
+ if ok {
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if err := writeString(w, string(data)); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+ return ferr
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, werr := w.Write(endBraceNewline); werr != nil {
+ return werr
+ }
+ continue
+ }
+ if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+ return ferr
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ var m map[int32]Extension
+ if em, ok := ep.(extensionsMap); ok {
+ m = em.ExtensionMap()
+ } else if em, ok := ep.(extensionsBytes); ok {
+ eb := em.GetExtensions()
+ var err error
+ m, err = BytesToExtensionsMap(*eb)
+ if err != nil {
+ return err
+ }
+ }
+
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000..cdb2337
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+ m, ok := enumStringMaps[props.Enum]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ key := int32(0)
+ if v.Kind() == reflect.Ptr {
+ key = int32(v.Elem().Int())
+ } else {
+ key = int32(v.Int())
+ }
+ s, ok := m[key]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ _, err := fmt.Fprint(w, s)
+ return err
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..f390969
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,841 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || p.s[0] != '"' {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+ if len(props.CustomType) > 0 {
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ tc := reflect.TypeOf(new(Marshaler))
+ ok := t.Elem().Implements(tc.Elem())
+ if ok {
+ fv := v
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.ValueOf(custom))
+ } else {
+ custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+ }
+ return nil
+ }
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/golang/glog/LICENSE b/src/kube2msb/vendor/github.com/golang/glog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/golang/glog/README b/src/kube2msb/vendor/github.com/golang/glog/README
new file mode 100644
index 0000000..5f9c114
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ http://code.google.com/p/google-glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation for the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/src/kube2msb/vendor/github.com/golang/glog/glog.go b/src/kube2msb/vendor/github.com/golang/glog/glog.go
new file mode 100644
index 0000000..3e63fff
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/glog.go
@@ -0,0 +1,1177 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// glog.Info("Prepare to repel boarders")
+//
+// glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if glog.V(2) {
+// glog.Info("Starting transaction...")
+// }
+//
+// glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=false
+// Logs are written to standard error instead of to files.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ infoLog severity = iota
+ warningLog
+ errorLog
+ fatalLog
+ numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+ return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+ atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+ return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+ return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+ var threshold severity
+ // Is it a known name?
+ if v, ok := severityByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ threshold = severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+func severityByName(s string) (severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range severityName {
+ if name == s {
+ return severity(i), true
+ }
+ }
+ return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+ infoLog: &Stats.Info,
+ warningLog: &Stats.Warning,
+ errorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.Atoi(patLev[1])
+ if err != nil {
+ return errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ t.line = 0
+ t.file = ""
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+func init() {
+ flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+ flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+ flag.Var(&logging.verbosity, "v", "log level for V logs")
+ flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+ flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+ // Default stderrThreshold is ERROR.
+ logging.stderrThreshold = errorLog
+
+ logging.setVState(0, nil, false)
+ go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severity // The -stderrthreshold flag.
+
+ // freeList is a list of byte buffers, maintained under freeListMu.
+ freeList *buffer
+ // freeListMu maintains the free list. It is separate from the main mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ freeListMu sync.Mutex
+
+ // mu protects the remaining elements of this structure and is
+ // used to synchronize logging.
+ mu sync.Mutex
+ // file holds writer for each of the log types.
+ file [numSeverity]flushSyncWriter
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+ bytes.Buffer
+ tmp [64]byte // temporary byte array for creating headers.
+ next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ logging.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&logging.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ logging.vmodule.filter = filter
+ logging.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+ logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+ l.freeListMu.Lock()
+ b := l.freeList
+ if b != nil {
+ l.freeList = b.next
+ }
+ l.freeListMu.Unlock()
+ if b == nil {
+ b = new(buffer)
+ } else {
+ b.next = nil
+ b.Reset()
+ }
+ return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death.
+ return
+ }
+ l.freeListMu.Lock()
+ b.next = l.freeList
+ l.freeList = b
+ l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+ now := timeNow()
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > fatalLog {
+ s = infoLog // for safety.
+ }
+ buf := l.getBuffer()
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.tmp[0] = severityChar[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.tmp[21] = ' '
+ buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+ buf.tmp[29] = ' '
+ buf.Write(buf.tmp[:30])
+ buf.WriteString(file)
+ buf.tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.tmp[n+1] = ']'
+ buf.tmp[n+2] = ' '
+ buf.Write(buf.tmp[:n+3])
+ return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+ buf.tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.tmp)
+ for {
+ j--
+ buf.tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintln(buf, args...)
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+ l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+ l.mu.Lock()
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ switch s {
+ case fatalLog:
+ l.file[fatalLog].Write(data)
+ fallthrough
+ case errorLog:
+ l.file[errorLog].Write(data)
+ fallthrough
+ case warningLog:
+ l.file[warningLog].Write(data)
+ fallthrough
+ case infoLog:
+ l.file[infoLog].Write(data)
+ }
+ }
+ if s == fatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ // First, make sure we see the trace for the current goroutine on standard error.
+ // If -logtostderr has been specified, the loop below will do that anyway
+ // as the first stack in the full dump.
+ if !l.toStderr {
+ os.Stderr.Write(stacks(false))
+ }
+ // Write the stack trace for all goroutines to the files.
+ trace := stacks(true)
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := fatalLog; log >= infoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ l.putBuffer(buf)
+ l.mu.Unlock()
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+ }
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity
+ nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= MaxSize {
+ if err := sb.rotateFile(time.Now()); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severityName[sb.sev], now)
+ sb.nbytes = 0
+ if err != nil {
+ return err
+ }
+
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ }
+ if err := sb.rotateFile(now); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+ for _ = range time.NewTicker(flushInterval).C {
+ l.lockAndFlushAll()
+ }
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := fatalLog; s >= infoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity(lb), file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+// if glog.V(2) { glog.Info("log this") }
+// or
+// glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return Verbose(true)
+ }
+
+ // It's off globally but it vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2, logging.pcs[:]) == 0 {
+ return Verbose(false)
+ }
+ v, ok := logging.vmap[logging.pcs[0]]
+ if !ok {
+ v = logging.setV(logging.pcs[0])
+ }
+ return Verbose(v >= level)
+ }
+ return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v {
+ logging.print(infoLog, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v {
+ logging.println(infoLog, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v {
+ logging.printf(infoLog, format, args...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+ logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+ logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+ logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+ logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(fatalLog, format, args...)
+}
diff --git a/src/kube2msb/vendor/github.com/golang/glog/glog_file.go b/src/kube2msb/vendor/github.com/golang/glog/glog_file.go
new file mode 100644
index 0000000..65075d2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+ if *logDir != "" {
+ logDirs = append(logDirs, *logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+)
+
+func init() {
+ h, err := os.Hostname()
+ if err == nil {
+ host = shortHostname(h)
+ }
+
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+
+ // Sanitize userName since it may contain filepath separators on Windows.
+ userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ userName,
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := os.Create(fname)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/LICENSE b/src/kube2msb/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile b/src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..f1f0656
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
+ make
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e98ddec
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,223 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..5810782
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,867 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..231b074
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1325 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f5db1de
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,276 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..054f4f1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,399 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..0de8f8d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,894 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..e25e01e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..749919d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..e9be0fe
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..d4531c0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,842 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..2336b14
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,751 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..4513232
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,806 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/LICENSE b/src/kube2msb/vendor/github.com/google/cadvisor/LICENSE
new file mode 100644
index 0000000..97cec18
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/LICENSE
@@ -0,0 +1,190 @@
+ Copyright 2014 The cAdvisor Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go
new file mode 100644
index 0000000..6e7e658
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go
@@ -0,0 +1,583 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "reflect"
+ "time"
+)
+
+type CpuSpec struct {
+ Limit uint64 `json:"limit"`
+ MaxLimit uint64 `json:"max_limit"`
+ Mask string `json:"mask,omitempty"`
+ Quota uint64 `json:"quota,omitempty"`
+ Period uint64 `json:"period,omitempty"`
+}
+
+type MemorySpec struct {
+ // The amount of memory requested. Default is unlimited (-1).
+ // Units: bytes.
+ Limit uint64 `json:"limit,omitempty"`
+
+ // The amount of guaranteed memory. Default is 0.
+ // Units: bytes.
+ Reservation uint64 `json:"reservation,omitempty"`
+
+ // The amount of swap space requested. Default is unlimited (-1).
+ // Units: bytes.
+ SwapLimit uint64 `json:"swap_limit,omitempty"`
+}
+
+type ContainerSpec struct {
+ // Time at which the container was created.
+ CreationTime time.Time `json:"creation_time,omitempty"`
+
+ // Metadata labels associated with this container.
+ Labels map[string]string `json:"labels,omitempty"`
+ // Metadata envs associated with this container. Only whitelisted envs are added.
+ Envs map[string]string `json:"envs,omitempty"`
+
+ HasCpu bool `json:"has_cpu"`
+ Cpu CpuSpec `json:"cpu,omitempty"`
+
+ HasMemory bool `json:"has_memory"`
+ Memory MemorySpec `json:"memory,omitempty"`
+
+ HasNetwork bool `json:"has_network"`
+
+ HasFilesystem bool `json:"has_filesystem"`
+
+ // HasDiskIo when true, indicates that DiskIo stats will be available.
+ HasDiskIo bool `json:"has_diskio"`
+
+ HasCustomMetrics bool `json:"has_custom_metrics"`
+ CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"`
+
+ // Image name used for this container.
+ Image string `json:"image,omitempty"`
+}
+
+// Container reference contains enough information to uniquely identify a container
+type ContainerReference struct {
+ // The container id
+ Id string `json:"id,omitempty"`
+
+ // The absolute name of the container. This is unique on the machine.
+ Name string `json:"name"`
+
+ // Other names by which the container is known within a certain namespace.
+ // This is unique within that namespace.
+ Aliases []string `json:"aliases,omitempty"`
+
+ // Namespace under which the aliases of a container are unique.
+ // An example of a namespace is "docker" for Docker containers.
+ Namespace string `json:"namespace,omitempty"`
+
+ Labels map[string]string `json:"labels,omitempty"`
+}
+
+// Sorts by container name.
+type ContainerReferenceSlice []ContainerReference
+
+func (self ContainerReferenceSlice) Len() int { return len(self) }
+func (self ContainerReferenceSlice) Swap(i, j int) { self[i], self[j] = self[j], self[i] }
+func (self ContainerReferenceSlice) Less(i, j int) bool { return self[i].Name < self[j].Name }
+
+// ContainerInfoRequest is used when users check a container info from the REST API.
+// It specifies how much data users want to get about a container
+type ContainerInfoRequest struct {
+ // Max number of stats to return. Specify -1 for all stats currently available.
+ // Default: 60
+ NumStats int `json:"num_stats,omitempty"`
+
+ // Start time for which to query information.
+ // If ommitted, the beginning of time is assumed.
+ Start time.Time `json:"start,omitempty"`
+
+ // End time for which to query information.
+ // If ommitted, current time is assumed.
+ End time.Time `json:"end,omitempty"`
+}
+
+// Returns a ContainerInfoRequest with all default values specified.
+func DefaultContainerInfoRequest() ContainerInfoRequest {
+ return ContainerInfoRequest{
+ NumStats: 60,
+ }
+}
+
+func (self *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool {
+ return self.NumStats == other.NumStats &&
+ self.Start.Equal(other.Start) &&
+ self.End.Equal(other.End)
+}
+
+type ContainerInfo struct {
+ ContainerReference
+
+ // The direct subcontainers of the current container.
+ Subcontainers []ContainerReference `json:"subcontainers,omitempty"`
+
+ // The isolation used in the container.
+ Spec ContainerSpec `json:"spec,omitempty"`
+
+ // Historical statistics gathered from the container.
+ Stats []*ContainerStats `json:"stats,omitempty"`
+}
+
+// TODO(vmarmol): Refactor to not need this equality comparison.
+// ContainerInfo may be (un)marshaled by json or other en/decoder. In that
+// case, the Timestamp field in each stats/sample may not be precisely
+// en/decoded. This will lead to small but acceptable differences between a
+// ContainerInfo and its encode-then-decode version. Eq() is used to compare
+// two ContainerInfo accepting small difference (<10ms) of Time fields.
+func (self *ContainerInfo) Eq(b *ContainerInfo) bool {
+
+ // If both self and b are nil, then Eq() returns true
+ if self == nil {
+ return b == nil
+ }
+ if b == nil {
+ return self == nil
+ }
+
+ // For fields other than time.Time, we will compare them precisely.
+ // This would require that any slice should have same order.
+ if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) {
+ return false
+ }
+ if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) {
+ return false
+ }
+ if !self.Spec.Eq(&b.Spec) {
+ return false
+ }
+
+ for i, expectedStats := range b.Stats {
+ selfStats := self.Stats[i]
+ if !expectedStats.Eq(selfStats) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self *ContainerSpec) Eq(b *ContainerSpec) bool {
+ // Creation within 1s of each other.
+ diff := self.CreationTime.Sub(b.CreationTime)
+ if (diff > time.Second) || (diff < -time.Second) {
+ return false
+ }
+
+ if self.HasCpu != b.HasCpu {
+ return false
+ }
+ if !reflect.DeepEqual(self.Cpu, b.Cpu) {
+ return false
+ }
+ if self.HasMemory != b.HasMemory {
+ return false
+ }
+ if !reflect.DeepEqual(self.Memory, b.Memory) {
+ return false
+ }
+ if self.HasNetwork != b.HasNetwork {
+ return false
+ }
+ if self.HasFilesystem != b.HasFilesystem {
+ return false
+ }
+ if self.HasDiskIo != b.HasDiskIo {
+ return false
+ }
+ if self.HasCustomMetrics != b.HasCustomMetrics {
+ return false
+ }
+ return true
+}
+
+func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats {
+ n := len(self.Stats) + 1
+ for i, s := range self.Stats {
+ if s.Timestamp.After(ref) {
+ n = i
+ break
+ }
+ }
+ if n > len(self.Stats) {
+ return nil
+ }
+ return self.Stats[n:]
+}
+
+func (self *ContainerInfo) StatsStartTime() time.Time {
+ var ret time.Time
+ for _, s := range self.Stats {
+ if s.Timestamp.Before(ret) || ret.IsZero() {
+ ret = s.Timestamp
+ }
+ }
+ return ret
+}
+
+func (self *ContainerInfo) StatsEndTime() time.Time {
+ var ret time.Time
+ for i := len(self.Stats) - 1; i >= 0; i-- {
+ s := self.Stats[i]
+ if s.Timestamp.After(ret) {
+ ret = s.Timestamp
+ }
+ }
+ return ret
+}
+
+// This mirrors kernel internal structure.
+type LoadStats struct {
+ // Number of sleeping tasks.
+ NrSleeping uint64 `json:"nr_sleeping"`
+
+ // Number of running tasks.
+ NrRunning uint64 `json:"nr_running"`
+
+ // Number of tasks in stopped state
+ NrStopped uint64 `json:"nr_stopped"`
+
+ // Number of tasks in uninterruptible state
+ NrUninterruptible uint64 `json:"nr_uninterruptible"`
+
+ // Number of tasks waiting on IO
+ NrIoWait uint64 `json:"nr_io_wait"`
+}
+
+// CPU usage time statistics.
+type CpuUsage struct {
+ // Total CPU usage.
+ // Units: nanoseconds
+ Total uint64 `json:"total"`
+
+ // Per CPU/core usage of the container.
+ // Unit: nanoseconds.
+ PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
+
+ // Time spent in user space.
+ // Unit: nanoseconds
+ User uint64 `json:"user"`
+
+ // Time spent in kernel space.
+ // Unit: nanoseconds
+ System uint64 `json:"system"`
+}
+
+// All CPU usage metrics are cumulative from the creation of the container
+type CpuStats struct {
+ Usage CpuUsage `json:"usage"`
+ // Smoothed average of number of runnable threads x 1000.
+ // We multiply by thousand to avoid using floats, but preserving precision.
+ // Load is smoothed over the last 10 seconds. Instantaneous value can be read
+ // from LoadStats.NrRunning.
+ LoadAverage int32 `json:"load_average"`
+}
+
+type PerDiskStats struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Stats map[string]uint64 `json:"stats"`
+}
+
+type DiskIoStats struct {
+ IoServiceBytes []PerDiskStats `json:"io_service_bytes,omitempty"`
+ IoServiced []PerDiskStats `json:"io_serviced,omitempty"`
+ IoQueued []PerDiskStats `json:"io_queued,omitempty"`
+ Sectors []PerDiskStats `json:"sectors,omitempty"`
+ IoServiceTime []PerDiskStats `json:"io_service_time,omitempty"`
+ IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"`
+ IoMerged []PerDiskStats `json:"io_merged,omitempty"`
+ IoTime []PerDiskStats `json:"io_time,omitempty"`
+}
+
+type MemoryStats struct {
+ // Current memory usage, this includes all memory regardless of when it was
+ // accessed.
+ // Units: Bytes.
+ Usage uint64 `json:"usage"`
+
+ // Number of bytes of page cache memory.
+ // Units: Bytes.
+ Cache uint64 `json:"cache"`
+
+ // The amount of anonymous and swap cache memory (includes transparent
+ // hugepages).
+ // Units: Bytes.
+ RSS uint64 `json:"rss"`
+
+ // The amount of working set memory, this includes recently accessed memory,
+ // dirty memory, and kernel memory. Working set is <= "usage".
+ // Units: Bytes.
+ WorkingSet uint64 `json:"working_set"`
+
+ Failcnt uint64 `json:"failcnt"`
+
+ ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"`
+ HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
+}
+
+type MemoryStatsMemoryData struct {
+ Pgfault uint64 `json:"pgfault"`
+ Pgmajfault uint64 `json:"pgmajfault"`
+}
+
+type InterfaceStats struct {
+ // The name of the interface.
+ Name string `json:"name"`
+ // Cumulative count of bytes received.
+ RxBytes uint64 `json:"rx_bytes"`
+ // Cumulative count of packets received.
+ RxPackets uint64 `json:"rx_packets"`
+ // Cumulative count of receive errors encountered.
+ RxErrors uint64 `json:"rx_errors"`
+ // Cumulative count of packets dropped while receiving.
+ RxDropped uint64 `json:"rx_dropped"`
+ // Cumulative count of bytes transmitted.
+ TxBytes uint64 `json:"tx_bytes"`
+ // Cumulative count of packets transmitted.
+ TxPackets uint64 `json:"tx_packets"`
+ // Cumulative count of transmit errors encountered.
+ TxErrors uint64 `json:"tx_errors"`
+ // Cumulative count of packets dropped while transmitting.
+ TxDropped uint64 `json:"tx_dropped"`
+}
+
+type NetworkStats struct {
+ InterfaceStats `json:",inline"`
+ Interfaces []InterfaceStats `json:"interfaces,omitempty"`
+ // TCP connection stats (Established, Listen...)
+ Tcp TcpStat `json:"tcp"`
+ // TCP6 connection stats (Established, Listen...)
+ Tcp6 TcpStat `json:"tcp6"`
+}
+
+type TcpStat struct {
+ //Count of TCP connections in state "Established"
+ Established uint64
+ //Count of TCP connections in state "Syn_Sent"
+ SynSent uint64
+ //Count of TCP connections in state "Syn_Recv"
+ SynRecv uint64
+ //Count of TCP connections in state "Fin_Wait1"
+ FinWait1 uint64
+ //Count of TCP connections in state "Fin_Wait2"
+ FinWait2 uint64
+ //Count of TCP connections in state "Time_Wait
+ TimeWait uint64
+ //Count of TCP connections in state "Close"
+ Close uint64
+ //Count of TCP connections in state "Close_Wait"
+ CloseWait uint64
+ //Count of TCP connections in state "Listen_Ack"
+ LastAck uint64
+ //Count of TCP connections in state "Listen"
+ Listen uint64
+ //Count of TCP connections in state "Closing"
+ Closing uint64
+}
+
+type FsStats struct {
+ // The block device name associated with the filesystem.
+ Device string `json:"device,omitempty"`
+
+ // Type of the filesytem.
+ Type string `json:"type"`
+
+ // Number of bytes that can be consumed by the container on this filesystem.
+ Limit uint64 `json:"capacity"`
+
+ // Number of bytes that is consumed by the container on this filesystem.
+ Usage uint64 `json:"usage"`
+
+ // Base Usage that is consumed by the container's writable layer.
+ // This field is only applicable for docker container's as of now.
+ BaseUsage uint64 `json:"base_usage"`
+
+ // Number of bytes available for non-root user.
+ Available uint64 `json:"available"`
+
+ // Number of available Inodes
+ InodesFree uint64 `json:"inodes_free"`
+
+ // Number of reads completed
+ // This is the total number of reads completed successfully.
+ ReadsCompleted uint64 `json:"reads_completed"`
+
+ // Number of reads merged
+ // Reads and writes which are adjacent to each other may be merged for
+ // efficiency. Thus two 4K reads may become one 8K read before it is
+ // ultimately handed to the disk, and so it will be counted (and queued)
+ // as only one I/O. This field lets you know how often this was done.
+ ReadsMerged uint64 `json:"reads_merged"`
+
+ // Number of sectors read
+ // This is the total number of sectors read successfully.
+ SectorsRead uint64 `json:"sectors_read"`
+
+ // Number of milliseconds spent reading
+ // This is the total number of milliseconds spent by all reads (as
+ // measured from __make_request() to end_that_request_last()).
+ ReadTime uint64 `json:"read_time"`
+
+ // Number of writes completed
+ // This is the total number of writes completed successfully.
+ WritesCompleted uint64 `json:"writes_completed"`
+
+ // Number of writes merged
+ // See the description of reads merged.
+ WritesMerged uint64 `json:"writes_merged"`
+
+ // Number of sectors written
+ // This is the total number of sectors written successfully.
+ SectorsWritten uint64 `json:"sectors_written"`
+
+ // Number of milliseconds spent writing
+ // This is the total number of milliseconds spent by all writes (as
+ // measured from __make_request() to end_that_request_last()).
+ WriteTime uint64 `json:"write_time"`
+
+ // Number of I/Os currently in progress
+ // The only field that should go to zero. Incremented as requests are
+ // given to appropriate struct request_queue and decremented as they finish.
+ IoInProgress uint64 `json:"io_in_progress"`
+
+ // Number of milliseconds spent doing I/Os
+ // This field increases so long as field 9 is nonzero.
+ IoTime uint64 `json:"io_time"`
+
+ // weighted number of milliseconds spent doing I/Os
+ // This field is incremented at each I/O start, I/O completion, I/O
+ // merge, or read of these stats by the number of I/Os in progress
+ // (field 9) times the number of milliseconds spent doing I/O since the
+ // last update of this field. This can provide an easy measure of both
+ // I/O completion time and the backlog that may be accumulating.
+ WeightedIoTime uint64 `json:"weighted_io_time"`
+}
+
+type ContainerStats struct {
+ // The time of this stat point.
+ Timestamp time.Time `json:"timestamp"`
+ Cpu CpuStats `json:"cpu,omitempty"`
+ DiskIo DiskIoStats `json:"diskio,omitempty"`
+ Memory MemoryStats `json:"memory,omitempty"`
+ Network NetworkStats `json:"network,omitempty"`
+
+ // Filesystem statistics
+ Filesystem []FsStats `json:"filesystem,omitempty"`
+
+ // Task load stats
+ TaskStats LoadStats `json:"task_stats,omitempty"`
+
+ //Custom metrics from all collectors
+ CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"`
+}
+
+func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {
+ // t1 should not be later than t2
+ if t1.After(t2) {
+ t1, t2 = t2, t1
+ }
+ diff := t2.Sub(t1)
+ if diff <= tolerance {
+ return true
+ }
+ return false
+}
+
+const (
+ // 10ms, i.e. 0.01s
+ timePrecision time.Duration = 10 * time.Millisecond
+)
+
+// This function is useful because we do not require precise time
+// representation.
+func (a *ContainerStats) Eq(b *ContainerStats) bool {
+ if !timeEq(a.Timestamp, b.Timestamp, timePrecision) {
+ return false
+ }
+ return a.StatsEq(b)
+}
+
+// Checks equality of the stats values.
+func (a *ContainerStats) StatsEq(b *ContainerStats) bool {
+ // TODO(vmarmol): Consider using this through reflection.
+ if !reflect.DeepEqual(a.Cpu, b.Cpu) {
+ return false
+ }
+ if !reflect.DeepEqual(a.Memory, b.Memory) {
+ return false
+ }
+ if !reflect.DeepEqual(a.DiskIo, b.DiskIo) {
+ return false
+ }
+ if !reflect.DeepEqual(a.Network, b.Network) {
+ return false
+ }
+ if !reflect.DeepEqual(a.Filesystem, b.Filesystem) {
+ return false
+ }
+ return true
+}
+
+// Event contains information general to events such as the time at which they
+// occurred, their specific type, and the actual event. Event types are
+// differentiated by the EventType field of Event.
+type Event struct {
+ // the absolute container name for which the event occurred
+ ContainerName string `json:"container_name"`
+
+ // the time at which the event occurred
+ Timestamp time.Time `json:"timestamp"`
+
+ // the type of event. EventType is an enumerated type
+ EventType EventType `json:"event_type"`
+
+ // the original event object and all of its extraneous data, ex. an
+ // OomInstance
+ EventData EventData `json:"event_data,omitempty"`
+}
+
+// EventType is an enumerated type which lists the categories under which
+// events may fall. The Event field EventType is populated by this enum.
+type EventType string
+
+const (
+ EventOom EventType = "oom"
+ EventOomKill = "oomKill"
+ EventContainerCreation = "containerCreation"
+ EventContainerDeletion = "containerDeletion"
+)
+
+// Extra information about an event. Only one type will be set.
+type EventData struct {
+ // Information about an OOM kill event.
+ OomKill *OomKillEventData `json:"oom,omitempty"`
+}
+
+// Information related to an OOM kill instance
+type OomKillEventData struct {
+ // process id of the killed process
+ Pid int `json:"pid"`
+
+ // The name of the killed process
+ ProcessName string `json:"process_name"`
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go
new file mode 100644
index 0000000..2703c53
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Types used for docker containers.
+package v1
+
+type DockerStatus struct {
+ Version string `json:"version"`
+ KernelVersion string `json:"kernel_version"`
+ OS string `json:"os"`
+ Hostname string `json:"hostname"`
+ RootDir string `json:"root_dir"`
+ Driver string `json:"driver"`
+ DriverStatus map[string]string `json:"driver_status"`
+ ExecDriver string `json:"exec_driver"`
+ NumImages int `json:"num_images"`
+ NumContainers int `json:"num_containers"`
+}
+
+type DockerImage struct {
+ ID string `json:"id"`
+ RepoTags []string `json:"repo_tags"` // repository name and tags.
+ Created int64 `json:"created"` // unix time since creation.
+ VirtualSize int64 `json:"virtual_size"`
+ Size int64 `json:"size"`
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go
new file mode 100644
index 0000000..74a5df4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go
@@ -0,0 +1,205 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+type FsInfo struct {
+ // Block device associated with the filesystem.
+ Device string `json:"device"`
+
+ // Total number of bytes available on the filesystem.
+ Capacity uint64 `json:"capacity"`
+
+ // Type of device.
+ Type string `json:"type"`
+
+ // Total number of inodes available on the filesystem.
+ Inodes uint64 `json:"inodes"`
+}
+
+type Node struct {
+ Id int `json:"node_id"`
+ // Per-node memory
+ Memory uint64 `json:"memory"`
+ Cores []Core `json:"cores"`
+ Caches []Cache `json:"caches"`
+}
+
+type Core struct {
+ Id int `json:"core_id"`
+ Threads []int `json:"thread_ids"`
+ Caches []Cache `json:"caches"`
+}
+
+type Cache struct {
+ // Size of memory cache in bytes.
+ Size uint64 `json:"size"`
+ // Type of memory cache: data, instruction, or unified.
+ Type string `json:"type"`
+ // Level (distance from cpus) in a multi-level cache hierarchy.
+ Level int `json:"level"`
+}
+
+func (self *Node) FindCore(id int) (bool, int) {
+ for i, n := range self.Cores {
+ if n.Id == id {
+ return true, i
+ }
+ }
+ return false, -1
+}
+
+func (self *Node) AddThread(thread int, core int) {
+ var coreIdx int
+ if core == -1 {
+ // Assume one hyperthread per core when topology data is missing.
+ core = thread
+ }
+ ok, coreIdx := self.FindCore(core)
+
+ if !ok {
+ // New core
+ core := Core{Id: core}
+ self.Cores = append(self.Cores, core)
+ coreIdx = len(self.Cores) - 1
+ }
+ self.Cores[coreIdx].Threads = append(self.Cores[coreIdx].Threads, thread)
+}
+
+func (self *Node) AddNodeCache(c Cache) {
+ self.Caches = append(self.Caches, c)
+}
+
+func (self *Node) AddPerCoreCache(c Cache) {
+ for idx := range self.Cores {
+ self.Cores[idx].Caches = append(self.Cores[idx].Caches, c)
+ }
+}
+
+type DiskInfo struct {
+ // device name
+ Name string `json:"name"`
+
+ // Major number
+ Major uint64 `json:"major"`
+
+ // Minor number
+ Minor uint64 `json:"minor"`
+
+ // Size in bytes
+ Size uint64 `json:"size"`
+
+ // I/O Scheduler - one of "none", "noop", "cfq", "deadline"
+ Scheduler string `json:"scheduler"`
+}
+
+type NetInfo struct {
+ // Device name
+ Name string `json:"name"`
+
+ // Mac Address
+ MacAddress string `json:"mac_address"`
+
+ // Speed in MBits/s
+ Speed int64 `json:"speed"`
+
+ // Maximum Transmission Unit
+ Mtu int64 `json:"mtu"`
+}
+
+type CloudProvider string
+
+const (
+ GCE CloudProvider = "GCE"
+ AWS = "AWS"
+ Azure = "Azure"
+ Baremetal = "Baremetal"
+ UnknownProvider = "Unknown"
+)
+
+type InstanceType string
+
+const (
+ NoInstance InstanceType = "None"
+ UnknownInstance = "Unknown"
+)
+
+type InstanceID string
+
+const (
+ UnNamedInstance InstanceID = "None"
+)
+
+type MachineInfo struct {
+ // The number of cores in this machine.
+ NumCores int `json:"num_cores"`
+
+ // Maximum clock speed for the cores, in KHz.
+ CpuFrequency uint64 `json:"cpu_frequency_khz"`
+
+ // The amount of memory (in bytes) in this machine
+ MemoryCapacity uint64 `json:"memory_capacity"`
+
+ // The machine id
+ MachineID string `json:"machine_id"`
+
+ // The system uuid
+ SystemUUID string `json:"system_uuid"`
+
+ // The boot id
+ BootID string `json:"boot_id"`
+
+ // Filesystems on this machine.
+ Filesystems []FsInfo `json:"filesystems"`
+
+ // Disk map
+ DiskMap map[string]DiskInfo `json:"disk_map"`
+
+ // Network devices
+ NetworkDevices []NetInfo `json:"network_devices"`
+
+ // Machine Topology
+ // Describes cpu/memory layout and hierarchy.
+ Topology []Node `json:"topology"`
+
+ // Cloud provider the machine belongs to.
+ CloudProvider CloudProvider `json:"cloud_provider"`
+
+ // Type of cloud instance (e.g. GCE standard) the machine is.
+ InstanceType InstanceType `json:"instance_type"`
+
+ // ID of cloud instance (e.g. instance-1) given to it by the cloud provider.
+ InstanceID InstanceID `json:"instance_id"`
+}
+
+type VersionInfo struct {
+ // Kernel version.
+ KernelVersion string `json:"kernel_version"`
+
+ // OS image being used for cadvisor container, or host image if running on host directly.
+ ContainerOsVersion string `json:"container_os_version"`
+
+ // Docker version.
+ DockerVersion string `json:"docker_version"`
+
+ // cAdvisor version.
+ CadvisorVersion string `json:"cadvisor_version"`
+ // cAdvisor git revision.
+ CadvisorRevision string `json:"cadvisor_revision"`
+}
+
+type MachineInfoFactory interface {
+ GetMachineInfo() (*MachineInfo, error)
+ GetVersionInfo() (*VersionInfo, error)
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go
new file mode 100644
index 0000000..90fd9e4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go
@@ -0,0 +1,79 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "time"
+)
+
+// Type of metric being exported.
+type MetricType string
+
+const (
+ // Instantaneous value. May increase or decrease.
+ MetricGauge MetricType = "gauge"
+
+ // A counter-like value that is only expected to increase.
+ MetricCumulative = "cumulative"
+
+ // Rate over a time period.
+ MetricDelta = "delta"
+)
+
+// DataType for metric being exported.
+type DataType string
+
+const (
+ IntType DataType = "int"
+ FloatType = "float"
+)
+
+// Spec for custom metric.
+type MetricSpec struct {
+ // The name of the metric.
+ Name string `json:"name"`
+
+ // Type of the metric.
+ Type MetricType `json:"type"`
+
+ // Data Type for the stats.
+ Format DataType `json:"format"`
+
+ // Display Units for the stats.
+ Units string `json:"units"`
+}
+
+// An exported metric.
+type MetricValBasic struct {
+ // Time at which the metric was queried
+ Timestamp time.Time `json:"timestamp"`
+
+ // The value of the metric at this point.
+ IntValue int64 `json:"int_value,omitempty"`
+ FloatValue float64 `json:"float_value,omitempty"`
+}
+
+// An exported metric.
+type MetricVal struct {
+ // Label associated with a metric
+ Label string `json:"label,omitempty"`
+
+ // Time at which the metric was queried
+ Timestamp time.Time `json:"timestamp"`
+
+ // The value of the metric at this point.
+ IntValue int64 `json:"int_value,omitempty"`
+ FloatValue float64 `json:"float_value,omitempty"`
+}
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md
new file mode 100644
index 0000000..51cf5cd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Go makes it very simple to ensure properly formatted code, so always run
+ `go fmt` on your code before committing it. You should also run
+ [golint][] over your code. As noted in the [golint readme][], it's not
+ strictly necessary that your code be completely "lint-free", but this will
+ help you find common style issues.
+
+ 1. Any significant changes should almost always be accompanied by tests. The
+ project already has good test coverage, so look at some of the existing
+ tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
+ are invaluable tools for seeing which parts of your code aren't being
+ exercised by your tests.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[golint]: https://github.com/golang/lint
+[golint readme]: https://github.com/golang/lint/blob/master/README
+[gocov]: https://github.com/axw/gocov
+[gocov-html]: https://github.com/matm/gocov-html
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/LICENSE b/src/kube2msb/vendor/github.com/google/gofuzz/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/README.md b/src/kube2msb/vendor/github.com/google/gofuzz/README.md
new file mode 100644
index 0000000..68fcf2c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/README.md
@@ -0,0 +1,71 @@
+gofuzz
+======
+
+gofuzz is a library for populating go objects with random values.
+
+[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
+[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "github.com/google/gofuzz"```
+
+You can use it on single variables:
+```
+f := fuzz.New()
+var myInt int
+f.Fuzz(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```
+f := fuzz.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fuzz(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```
+f := fuzz.New().NilChance(.5)
+var fancyStruct struct {
+ A, B, C, D *string
+}
+f.Fuzz(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```
+type MyEnum string
+const (
+ A MyEnum = "A"
+ B MyEnum = "B"
+)
+type MyInfo struct {
+ Type MyEnum
+ AInfo *string
+ BInfo *string
+}
+
+f := fuzz.New().NilChance(0).Funcs(
+ func(e *MyInfo, c fuzz.Continue) {
+ switch c.Intn(2) {
+ case 0:
+ e.Type = A
+ c.Fuzz(&e.AInfo)
+ case 1:
+ e.Type = B
+ c.Fuzz(&e.BInfo)
+ }
+ },
+)
+
+var myObject MyInfo
+f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+Happy testing!
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/doc.go b/src/kube2msb/vendor/github.com/google/gofuzz/doc.go
new file mode 100644
index 0000000..9f9956d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fuzz is a library for populating go objects with random values.
+package fuzz
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go b/src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go
new file mode 100644
index 0000000..42d9a48
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go
@@ -0,0 +1,446 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzz
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "time"
+)
+
+// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
+type fuzzFuncMap map[reflect.Type]reflect.Value
+
+// Fuzzer knows how to fill any object with random fields.
+type Fuzzer struct {
+ fuzzFuncs fuzzFuncMap
+ defaultFuzzFuncs fuzzFuncMap
+ r *rand.Rand
+ nilChance float64
+ minElements int
+ maxElements int
+}
+
+// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Fuzzer {
+ f := &Fuzzer{
+ defaultFuzzFuncs: fuzzFuncMap{
+ reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
+ },
+
+ fuzzFuncs: fuzzFuncMap{},
+ r: rand.New(rand.NewSource(time.Now().UnixNano())),
+ nilChance: .2,
+ minElements: 1,
+ maxElements: 10,
+ }
+ return f
+}
+
+// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
+//
+// Each entry in fuzzFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// fuzz.Continue, which will provide a source of randomness and a way
+// to automatically continue fuzzing smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChange option. For slices, it
+// doesn't make much sense to pre-create them--Fuzzer doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
+ for i := range fuzzFuncs {
+ v := reflect.ValueOf(fuzzFuncs[i])
+ if v.Kind() != reflect.Func {
+ panic("Need only funcs!")
+ }
+ t := v.Type()
+ if t.NumIn() != 2 || t.NumOut() != 0 {
+ panic("Need 2 in and 0 out params!")
+ }
+ argT := t.In(0)
+ switch argT.Kind() {
+ case reflect.Ptr, reflect.Map:
+ default:
+ panic("fuzzFunc must take pointer or map type")
+ }
+ if t.In(1) != reflect.TypeOf(Continue{}) {
+ panic("fuzzFunc's second parameter must be type fuzz.Continue")
+ }
+ f.fuzzFuncs[argT] = v
+ }
+ return f
+}
+
+// RandSource causes f to get values from the given source of randomness.
+// Use if you want deterministic fuzzing.
+func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
+ f.r = rand.New(s)
+ return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Fuzzer) NilChance(p float64) *Fuzzer {
+ if p < 0 || p > 1 {
+ panic("p should be between 0 and 1, inclusive.")
+ }
+ f.nilChance = p
+ return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
+ if atLeast > atMost {
+ panic("atLeast must be <= atMost")
+ }
+ if atLeast < 0 {
+ panic("atLeast must be >= 0")
+ }
+ f.minElements = atLeast
+ f.maxElements = atMost
+ return f
+}
+
+func (f *Fuzzer) genElementCount() int {
+ if f.minElements == f.maxElements {
+ return f.minElements
+ }
+ return f.minElements + f.r.Intn(f.maxElements-f.minElements)
+}
+
+func (f *Fuzzer) genShouldFill() bool {
+ return f.r.Float64() > f.nilChance
+}
+
+// Fuzz recursively fills all of obj's fields with something random. First
+// this tries to find a custom fuzz function (see Funcs). If there is no
+// custom function this tests whether the object implements fuzz.Interface and,
+// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
+// there is a default fuzz function provided by this package. If all of that
+// fails, this will generate random values for all primitive fields and then
+// recurse for all non-primitives.
+//
+// Not safe for cyclic or tree-like structs!
+//
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.doFuzz(v, 0)
+}
+
+// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+// Not safe for cyclic or tree-like structs!
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.doFuzz(v, flagNoCustomFuzz)
+}
+
+const (
+ // Do not try to find a custom fuzz function. Does not apply recursively.
+ flagNoCustomFuzz uint64 = 1 << iota
+)
+
+func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
+ if !v.CanSet() {
+ return
+ }
+
+ if flags&flagNoCustomFuzz == 0 {
+ // Check for both pointer and non-pointer custom functions.
+ if v.CanAddr() && f.tryCustom(v.Addr()) {
+ return
+ }
+ if f.tryCustom(v) {
+ return
+ }
+ }
+
+ if fn, ok := fillFuncMap[v.Kind()]; ok {
+ fn(v, f.r)
+ return
+ }
+ switch v.Kind() {
+ case reflect.Map:
+ if f.genShouldFill() {
+ v.Set(reflect.MakeMap(v.Type()))
+ n := f.genElementCount()
+ for i := 0; i < n; i++ {
+ key := reflect.New(v.Type().Key()).Elem()
+ f.doFuzz(key, 0)
+ val := reflect.New(v.Type().Elem()).Elem()
+ f.doFuzz(val, 0)
+ v.SetMapIndex(key, val)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Ptr:
+ if f.genShouldFill() {
+ v.Set(reflect.New(v.Type().Elem()))
+ f.doFuzz(v.Elem(), 0)
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Slice:
+ if f.genShouldFill() {
+ n := f.genElementCount()
+ v.Set(reflect.MakeSlice(v.Type(), n, n))
+ for i := 0; i < n; i++ {
+ f.doFuzz(v.Index(i), 0)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ f.doFuzz(v.Field(i), 0)
+ }
+ case reflect.Array:
+ fallthrough
+ case reflect.Chan:
+ fallthrough
+ case reflect.Func:
+ fallthrough
+ case reflect.Interface:
+ fallthrough
+ default:
+ panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
+ }
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (f *Fuzzer) tryCustom(v reflect.Value) bool {
+ // First: see if we have a fuzz function for it.
+ doCustom, ok := f.fuzzFuncs[v.Type()]
+ if !ok {
+ // Second: see if it can fuzz itself.
+ if v.CanInterface() {
+ intf := v.Interface()
+ if fuzzable, ok := intf.(Interface); ok {
+ fuzzable.Fuzz(Continue{f: f, Rand: f.r})
+ return true
+ }
+ }
+ // Finally: see if there is a default fuzz function.
+ doCustom, ok = f.defaultFuzzFuncs[v.Type()]
+ if !ok {
+ return false
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Ptr:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ case reflect.Map:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ default:
+ return false
+ }
+
+ doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+ f: f,
+ Rand: f.r,
+ })})
+ return true
+}
+
+// Interface represents an object that knows how to fuzz itself. Any time we
+// find a type that implements this interface we will delegate the act of
+// fuzzing itself.
+type Interface interface {
+ Fuzz(c Continue)
+}
+
+// Continue can be passed to custom fuzzing functions to allow them to use
+// the correct source of randomness and to continue fuzzing their members.
+type Continue struct {
+ f *Fuzzer
+
+ // For convenience, Continue implements rand.Rand via embedding.
+ // Use this for generating any randomness if you want your fuzzing
+ // to be repeatable for a given seed.
+ *rand.Rand
+}
+
+// Fuzz continues fuzzing obj. obj must be a pointer.
+func (c Continue) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.f.doFuzz(v, 0)
+}
+
+// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+func (c Continue) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.f.doFuzz(v, flagNoCustomFuzz)
+}
+
+// RandString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func (c Continue) RandString() string {
+ return randString(c.Rand)
+}
+
+// RandUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) RandUint64() uint64 {
+ return randUint64(c.Rand)
+}
+
+// RandBool returns true or false randomly.
+func (c Continue) RandBool() bool {
+ return randBool(c.Rand)
+}
+
+func fuzzInt(v reflect.Value, r *rand.Rand) {
+ v.SetInt(int64(randUint64(r)))
+}
+
+func fuzzUint(v reflect.Value, r *rand.Rand) {
+ v.SetUint(randUint64(r))
+}
+
+func fuzzTime(t *time.Time, c Continue) {
+ var sec, nsec int64
+ // Allow for about 1000 years of random time values, which keeps things
+ // like JSON parsing reasonably happy.
+ sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+ c.Fuzz(&nsec)
+ *t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+ reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+ v.SetBool(randBool(r))
+ },
+ reflect.Int: fuzzInt,
+ reflect.Int8: fuzzInt,
+ reflect.Int16: fuzzInt,
+ reflect.Int32: fuzzInt,
+ reflect.Int64: fuzzInt,
+ reflect.Uint: fuzzUint,
+ reflect.Uint8: fuzzUint,
+ reflect.Uint16: fuzzUint,
+ reflect.Uint32: fuzzUint,
+ reflect.Uint64: fuzzUint,
+ reflect.Uintptr: fuzzUint,
+ reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(float64(r.Float32()))
+ },
+ reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(r.Float64())
+ },
+ reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+ reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+ reflect.String: func(v reflect.Value, r *rand.Rand) {
+ v.SetString(randString(r))
+ },
+ reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+ if r.Int()&1 == 1 {
+ return true
+ }
+ return false
+}
+
+type charRange struct {
+ first, last rune
+}
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (r *charRange) choose(rand *rand.Rand) rune {
+ count := int64(r.last - r.first)
+ return r.first + rune(rand.Int63n(count))
+}
+
+var unicodeRanges = []charRange{
+ {' ', '~'}, // ASCII characters
+ {'\u00a0', '\u02af'}, // Multi-byte encoded characters
+ {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ runes := make([]rune, n)
+ for i := range runes {
+ runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
+ }
+ return string(runes)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+ return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/LICENSE b/src/kube2msb/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 0000000..6866802
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/README.md b/src/kube2msb/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 0000000..cdcea0f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,68 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
+
+![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
+
+## Status
+
+It is ready for production use. It works fine although it may use more of testing. Here some projects in the wild using Mergo:
+
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+
+[![Build Status][1]][2]
+[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo)
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+
+## Installation
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v1
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/doc.go b/src/kube2msb/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 0000000..6e9aa7b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+ type networkConfig struct {
+ Protocol string
+ Address string
+ ServerType string `json: "server_type"`
+ Port uint16
+ }
+
+ type FssnConfig struct {
+ Network networkConfig
+ }
+
+ var fssnDefault = FssnConfig {
+ networkConfig {
+ "tcp",
+ "127.0.0.1",
+ "http",
+ 31560,
+ },
+ }
+
+ // Inside a function [...]
+
+ if err := mergo.Merge(&config, fssnDefault); err != nil {
+ log.Fatal(err)
+ }
+
+ // More code [...]
+
+*/
+package mergo
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/map.go b/src/kube2msb/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 0000000..44361e8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,146 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
+ return
+ }
+ } else {
+ if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0)
+}
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/merge.go b/src/kube2msb/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 0000000..5d328b1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,99 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "reflect"
+)
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ switch dst.Kind() {
+ case reflect.Struct:
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil {
+ return
+ }
+ }
+ case reflect.Map:
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Map:
+ if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
+ return
+ }
+ }
+ if !dstElement.IsValid() {
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if src.IsNil() {
+ break
+ } else if dst.IsNil() {
+ if dst.CanSet() && isEmptyValue(dst) {
+ dst.Set(src)
+ }
+ } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil {
+ return
+ }
+ default:
+ if dst.CanSet() && !isEmptyValue(src) {
+ dst.Set(src)
+ }
+ }
+ return
+}
+
+// Merge sets fields' values in dst from src if they have a zero
+// value of their type.
+// dst and src must be valid same-type structs and dst must be
+// a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+func Merge(dst, src interface{}) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
+}
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/mergo.go b/src/kube2msb/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 0000000..f8a0991
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ return // TODO refactor
+}
diff --git a/src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE b/src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE
new file mode 100644
index 0000000..5c304d1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md b/src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md
new file mode 100644
index 0000000..d43a6c7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md
@@ -0,0 +1,61 @@
+clockwork
+=========
+
+[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork)
+[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork)
+
+a simple fake clock for golang
+
+# Usage
+
+Replace uses of the `time` package with the `clockwork.Clock` interface instead.
+
+For example, instead of using `time.Sleep` directly:
+
+```
+func my_func() {
+ time.Sleep(3 * time.Second)
+ do_something()
+}
+```
+
+inject a clock and use its `Sleep` method instead:
+
+```
+func my_func(clock clockwork.Clock) {
+ clock.Sleep(3 * time.Second)
+ do_something()
+}
+```
+
+Now you can easily test `my_func` with a `FakeClock`:
+
+```
+func TestMyFunc(t *testing.T) {
+ c := clockwork.NewFakeClock()
+
+ // Start our sleepy function
+ my_func(c)
+
+ // Ensure we wait until my_func is sleeping
+ c.BlockUntil(1)
+
+ assert_state()
+
+ // Advance the FakeClock forward in time
+ c.Advance(3)
+
+ assert_state()
+}
+```
+
+and in production builds, simply inject the real clock instead:
+```
+my_func(clockwork.NewRealClock())
+```
+
+See [example_test.go](example_test.go) for a full example.
+
+# Credits
+
+clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
diff --git a/src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go b/src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go
new file mode 100644
index 0000000..1f1045b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go
@@ -0,0 +1,164 @@
+package clockwork
+
+import (
+ "sync"
+ "time"
+)
+
+// Clock provides an interface that packages can use instead of directly
+// using the time module, so that chronology-related behavior can be tested
+type Clock interface {
+ After(d time.Duration) <-chan time.Time
+ Sleep(d time.Duration)
+ Now() time.Time
+}
+
+// FakeClock provides an interface for a clock which can be
+// manually advanced through time
+type FakeClock interface {
+ Clock
+ // Advance advances the FakeClock to a new point in time, ensuring any existing
+ // sleepers are notified appropriately before returning
+ Advance(d time.Duration)
+ // BlockUntil will block until the FakeClock has the given number of
+ // sleepers (callers of Sleep or After)
+ BlockUntil(n int)
+}
+
+// NewRealClock returns a Clock which simply delegates calls to the actual time
+// package; it should be used by packages in production.
+func NewRealClock() Clock {
+ return &realClock{}
+}
+
+// NewFakeClock returns a FakeClock implementation which can be
+// manually advanced through time for testing.
+func NewFakeClock() FakeClock {
+ return &fakeClock{
+ l: sync.RWMutex{},
+
+ // use a fixture that does not fulfill Time.IsZero()
+ time: time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC),
+ }
+}
+
+type realClock struct{}
+
+func (rc *realClock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+func (rc *realClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+func (rc *realClock) Now() time.Time {
+ return time.Now()
+}
+
+type fakeClock struct {
+ sleepers []*sleeper
+ blockers []*blocker
+ time time.Time
+
+ l sync.RWMutex
+}
+
+// sleeper represents a caller of After or Sleep
+type sleeper struct {
+ until time.Time
+ done chan time.Time
+}
+
+// blocker represents a caller of BlockUntil
+type blocker struct {
+ count int
+ ch chan struct{}
+}
+
+// After mimics time.After; it waits for the given duration to elapse on the
+// fakeClock, then sends the current time on the returned channel.
+func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ now := fc.time
+ done := make(chan time.Time, 1)
+ if d.Nanoseconds() == 0 {
+ // special case - trigger immediately
+ done <- now
+ } else {
+ // otherwise, add to the set of sleepers
+ s := &sleeper{
+ until: now.Add(d),
+ done: done,
+ }
+ fc.sleepers = append(fc.sleepers, s)
+ // and notify any blockers
+ fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+ }
+ return done
+}
+
+// notifyBlockers notifies all the blockers waiting until the
+// given number of sleepers are waiting on the fakeClock. It
+// returns an updated slice of blockers (i.e. those still waiting)
+func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
+ for _, b := range blockers {
+ if b.count == count {
+ close(b.ch)
+ } else {
+ newBlockers = append(newBlockers, b)
+ }
+ }
+ return
+}
+
+// Sleep blocks until the given duration has passed on the fakeClock
+func (fc *fakeClock) Sleep(d time.Duration) {
+ <-fc.After(d)
+}
+
+// Time returns the current time of the fakeClock
+func (fc *fakeClock) Now() time.Time {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ return fc.time
+}
+
+// Advance advances fakeClock to a new point in time, ensuring channels from any
+// previous invocations of After are notified appropriately before returning
+func (fc *fakeClock) Advance(d time.Duration) {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ end := fc.time.Add(d)
+ var newSleepers []*sleeper
+ for _, s := range fc.sleepers {
+ if end.Sub(s.until) >= 0 {
+ s.done <- end
+ } else {
+ newSleepers = append(newSleepers, s)
+ }
+ }
+ fc.sleepers = newSleepers
+ fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+ fc.time = end
+}
+
+// BlockUntil will block until the fakeClock has the given number of sleepers
+// (callers of Sleep or After)
+func (fc *fakeClock) BlockUntil(n int) {
+ fc.l.Lock()
+ // Fast path: current number of sleepers is what we're looking for
+ if len(fc.sleepers) == n {
+ fc.l.Unlock()
+ return
+ }
+ // Otherwise, set up a new blocker
+ b := &blocker{
+ count: n,
+ ch: make(chan struct{}),
+ }
+ fc.blockers = append(fc.blockers, b)
+ fc.l.Unlock()
+ <-b.ch
+}
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE b/src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/README.md b/src/kube2msb/vendor/github.com/juju/ratelimit/README.md
new file mode 100644
index 0000000..a0fdfe2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/README.md
@@ -0,0 +1,117 @@
+# ratelimit
+--
+ import "github.com/juju/ratelimit"
+
+The ratelimit package provides an efficient token bucket implementation. See
+http://en.wikipedia.org/wiki/Token_bucket.
+
+## Usage
+
+#### func Reader
+
+```go
+func Reader(r io.Reader, bucket *Bucket) io.Reader
+```
+Reader returns a reader that is rate limited by the given token bucket. Each
+token in the bucket represents one byte.
+
+#### func Writer
+
+```go
+func Writer(w io.Writer, bucket *Bucket) io.Writer
+```
+Writer returns a writer that is rate limited by the given token bucket. Each
+token in the bucket represents one byte.
+
+#### type Bucket
+
+```go
+type Bucket struct {
+}
+```
+
+Bucket represents a token bucket that fills at a predetermined rate. Methods on
+Bucket may be called concurrently.
+
+#### func NewBucket
+
+```go
+func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
+```
+NewBucket returns a new token bucket that fills at the rate of one token every
+fillInterval, up to the given maximum capacity. Both arguments must be positive.
+The bucket is initially full.
+
+#### func NewBucketWithQuantum
+
+```go
+func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
+```
+NewBucketWithQuantum is similar to NewBucket, but allows the specification of
+the quantum size - quantum tokens are added every fillInterval.
+
+#### func NewBucketWithRate
+
+```go
+func NewBucketWithRate(rate float64, capacity int64) *Bucket
+```
+NewBucketWithRate returns a token bucket that fills the bucket at the rate of
+rate tokens per second up to the given maximum capacity. Because of limited
+clock resolution, at high rates, the actual rate may be up to 1% different from
+the specified rate.
+
+#### func (*Bucket) Rate
+
+```go
+func (tb *Bucket) Rate() float64
+```
+Rate returns the fill rate of the bucket, in tokens per second.
+
+#### func (*Bucket) Take
+
+```go
+func (tb *Bucket) Take(count int64) time.Duration
+```
+Take takes count tokens from the bucket without blocking. It returns the time
+that the caller should wait until the tokens are actually available.
+
+Note that if the request is irrevocable - there is no way to return tokens to
+the bucket once this method commits us to taking them.
+
+#### func (*Bucket) TakeAvailable
+
+```go
+func (tb *Bucket) TakeAvailable(count int64) int64
+```
+TakeAvailable takes up to count immediately available tokens from the bucket. It
+returns the number of tokens removed, or zero if there are no available tokens.
+It does not block.
+
+#### func (*Bucket) TakeMaxDuration
+
+```go
+func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
+```
+TakeMaxDuration is like Take, except that it will only take tokens from the
+bucket if the wait time for the tokens is no greater than maxWait.
+
+If it would take longer than maxWait for the tokens to become available, it does
+nothing and reports false, otherwise it returns the time that the caller should
+wait until the tokens are actually available, and reports true.
+
+#### func (*Bucket) Wait
+
+```go
+func (tb *Bucket) Wait(count int64)
+```
+Wait takes count tokens from the bucket, waiting until they are available.
+
+#### func (*Bucket) WaitMaxDuration
+
+```go
+func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
+```
+WaitMaxDuration is like Wait except that it will only take tokens from the
+bucket if it needs to wait for no greater than maxWait. It reports whether any
+tokens have been removed from the bucket If no tokens have been removed, it
+returns immediately.
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go b/src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go
new file mode 100644
index 0000000..3ef32fb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go
@@ -0,0 +1,245 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3 with static-linking exception.
+// See LICENCE file for details.
+
+// The ratelimit package provides an efficient token bucket implementation
+// that can be used to limit the rate of arbitrary things.
+// See http://en.wikipedia.org/wiki/Token_bucket.
+package ratelimit
+
+import (
+ "math"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// Bucket represents a token bucket that fills at a predetermined rate.
+// Methods on Bucket may be called concurrently.
+type Bucket struct {
+ startTime time.Time
+ capacity int64
+ quantum int64
+ fillInterval time.Duration
+
+ // The mutex guards the fields following it.
+ mu sync.Mutex
+
+ // avail holds the number of available tokens
+ // in the bucket, as of availTick ticks from startTime.
+ // It will be negative when there are consumers
+ // waiting for tokens.
+ avail int64
+ availTick int64
+}
+
+// NewBucket returns a new token bucket that fills at the
+// rate of one token every fillInterval, up to the given
+// maximum capacity. Both arguments must be
+// positive. The bucket is initially full.
+func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
+ return NewBucketWithQuantum(fillInterval, capacity, 1)
+}
+
+// rateMargin specifes the allowed variance of actual
+// rate from specified rate. 1% seems reasonable.
+const rateMargin = 0.01
+
+// NewBucketWithRate returns a token bucket that fills the bucket
+// at the rate of rate tokens per second up to the given
+// maximum capacity. Because of limited clock resolution,
+// at high rates, the actual rate may be up to 1% different from the
+// specified rate.
+func NewBucketWithRate(rate float64, capacity int64) *Bucket {
+ for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
+ fillInterval := time.Duration(1e9 * float64(quantum) / rate)
+ if fillInterval <= 0 {
+ continue
+ }
+ tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
+ if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
+ return tb
+ }
+ }
+ panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
+}
+
+// nextQuantum returns the next quantum to try after q.
+// We grow the quantum exponentially, but slowly, so we
+// get a good fit in the lower numbers.
+func nextQuantum(q int64) int64 {
+ q1 := q * 11 / 10
+ if q1 == q {
+ q1++
+ }
+ return q1
+}
+
+// NewBucketWithQuantum is similar to NewBucket, but allows
+// the specification of the quantum size - quantum tokens
+// are added every fillInterval.
+func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
+ if fillInterval <= 0 {
+ panic("token bucket fill interval is not > 0")
+ }
+ if capacity <= 0 {
+ panic("token bucket capacity is not > 0")
+ }
+ if quantum <= 0 {
+ panic("token bucket quantum is not > 0")
+ }
+ return &Bucket{
+ startTime: time.Now(),
+ capacity: capacity,
+ quantum: quantum,
+ avail: capacity,
+ fillInterval: fillInterval,
+ }
+}
+
+// Wait takes count tokens from the bucket, waiting until they are
+// available.
+func (tb *Bucket) Wait(count int64) {
+ if d := tb.Take(count); d > 0 {
+ time.Sleep(d)
+ }
+}
+
+// WaitMaxDuration is like Wait except that it will
+// only take tokens from the bucket if it needs to wait
+// for no greater than maxWait. It reports whether
+// any tokens have been removed from the bucket
+// If no tokens have been removed, it returns immediately.
+func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
+ d, ok := tb.TakeMaxDuration(count, maxWait)
+ if d > 0 {
+ time.Sleep(d)
+ }
+ return ok
+}
+
+const infinityDuration time.Duration = 0x7fffffffffffffff
+
+// Take takes count tokens from the bucket without blocking. It returns
+// the time that the caller should wait until the tokens are actually
+// available.
+//
+// Note that if the request is irrevocable - there is no way to return
+// tokens to the bucket once this method commits us to taking them.
+func (tb *Bucket) Take(count int64) time.Duration {
+ d, _ := tb.take(time.Now(), count, infinityDuration)
+ return d
+}
+
+// TakeMaxDuration is like Take, except that
+// it will only take tokens from the bucket if the wait
+// time for the tokens is no greater than maxWait.
+//
+// If it would take longer than maxWait for the tokens
+// to become available, it does nothing and reports false,
+// otherwise it returns the time that the caller should
+// wait until the tokens are actually available, and reports
+// true.
+func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
+ return tb.take(time.Now(), count, maxWait)
+}
+
+// TakeAvailable takes up to count immediately available tokens from the
+// bucket. It returns the number of tokens removed, or zero if there are
+// no available tokens. It does not block.
+func (tb *Bucket) TakeAvailable(count int64) int64 {
+ return tb.takeAvailable(time.Now(), count)
+}
+
+// takeAvailable is the internal version of TakeAvailable - it takes the
+// current time as an argument to enable easy testing.
+func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
+ if count <= 0 {
+ return 0
+ }
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+
+ tb.adjust(now)
+ if tb.avail <= 0 {
+ return 0
+ }
+ if count > tb.avail {
+ count = tb.avail
+ }
+ tb.avail -= count
+ return count
+}
+
+// Available returns the number of available tokens. It will be negative
+// when there are consumers waiting for tokens. Note that if this
+// returns greater than zero, it does not guarantee that calls that take
+// tokens from the buffer will succeed, as the number of available
+// tokens could have changed in the meantime. This method is intended
+// primarily for metrics reporting and debugging.
+func (tb *Bucket) Available() int64 {
+ return tb.available(time.Now())
+}
+
+// available is the internal version of available - it takes the current time as
+// an argument to enable easy testing.
+func (tb *Bucket) available(now time.Time) int64 {
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+ tb.adjust(now)
+ return tb.avail
+}
+
+// Capacity returns the capacity that the bucket was created with.
+func (tb *Bucket) Capacity() int64 {
+ return tb.capacity
+}
+
+// Rate returns the fill rate of the bucket, in tokens per second.
+func (tb *Bucket) Rate() float64 {
+ return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
+}
+
+// take is the internal version of Take - it takes the current time as
+// an argument to enable easy testing.
+func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
+ if count <= 0 {
+ return 0, true
+ }
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+
+ currentTick := tb.adjust(now)
+ avail := tb.avail - count
+ if avail >= 0 {
+ tb.avail = avail
+ return 0, true
+ }
+ // Round up the missing tokens to the nearest multiple
+ // of quantum - the tokens won't be available until
+ // that tick.
+ endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
+ endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
+ waitTime := endTime.Sub(now)
+ if waitTime > maxWait {
+ return 0, false
+ }
+ tb.avail = avail
+ return waitTime, true
+}
+
+// adjust adjusts the current bucket capacity based on the current time.
+// It returns the current tick.
+func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
+ currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
+
+ if tb.avail >= tb.capacity {
+ return
+ }
+ tb.avail += (currentTick - tb.availTick) * tb.quantum
+ if tb.avail > tb.capacity {
+ tb.avail = tb.capacity
+ }
+ tb.availTick = currentTick
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/reader.go b/src/kube2msb/vendor/github.com/juju/ratelimit/reader.go
new file mode 100644
index 0000000..6403bf7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/reader.go
@@ -0,0 +1,51 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3 with static-linking exception.
+// See LICENCE file for details.
+
+package ratelimit
+
+import "io"
+
+type reader struct {
+ r io.Reader
+ bucket *Bucket
+}
+
+// Reader returns a reader that is rate limited by
+// the given token bucket. Each token in the bucket
+// represents one byte.
+func Reader(r io.Reader, bucket *Bucket) io.Reader {
+ return &reader{
+ r: r,
+ bucket: bucket,
+ }
+}
+
+func (r *reader) Read(buf []byte) (int, error) {
+ n, err := r.r.Read(buf)
+ if n <= 0 {
+ return n, err
+ }
+ r.bucket.Wait(int64(n))
+ return n, err
+}
+
+type writer struct {
+ w io.Writer
+ bucket *Bucket
+}
+
+// Writer returns a reader that is rate limited by
+// the given token bucket. Each token in the bucket
+// represents one byte.
+func Writer(w io.Writer, bucket *Bucket) io.Writer {
+ return &writer{
+ w: w,
+ bucket: bucket,
+ }
+}
+
+func (w *writer) Write(buf []byte) (int, error) {
+ w.bucket.Wait(int64(len(buf)))
+ return w.w.Write(buf)
+}
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 0000000..13f15df
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2013 Matt T. Proud
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000..66d9b54
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000..c318385
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000..4b76ea9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE b/src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 0000000..2744858
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE b/src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE
new file mode 100644
index 0000000..5c97abc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE
@@ -0,0 +1,17 @@
+runc
+
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (http://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see http://www.bis.doc.gov
+
+See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
new file mode 100644
index 0000000..274ab47
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
@@ -0,0 +1,64 @@
+// +build linux
+
+package cgroups
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager interface {
+ // Applies cgroup configuration to the process with the specified pid
+ Apply(pid int) error
+
+ // Returns the PIDs inside the cgroup set
+ GetPids() ([]int, error)
+
+ // Returns the PIDs inside the cgroup set & all sub-cgroups
+ GetAllPids() ([]int, error)
+
+ // Returns statistics for the cgroup set
+ GetStats() (*Stats, error)
+
+ // Toggles the freezer cgroup according with specified state
+ Freeze(state configs.FreezerState) error
+
+ // Destroys the cgroup set
+ Destroy() error
+
+ // NewCgroupManager() and LoadCgroupManager() require following attributes:
+ // Paths map[string]string
+ // Cgroups *cgroups.Cgroup
+ // Paths maps cgroup subsystem to path at which it is mounted.
+ // Cgroups specifies specific cgroup settings for the various subsystems
+
+ // Returns cgroup paths to save in a state file and to be able to
+ // restore the object later.
+ GetPaths() map[string]string
+
+ // Set the cgroup as configured.
+ Set(container *configs.Config) error
+}
+
+type NotFoundError struct {
+ Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+ return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+ return &NotFoundError{
+ Subsystem: sub,
+ }
+}
+
+func IsNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*NotFoundError)
+ return ok
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
new file mode 100644
index 0000000..278d507
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package cgroups
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
new file mode 100644
index 0000000..633ab04
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
@@ -0,0 +1,402 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+var (
+ subsystems = subsystemSet{
+ &CpusetGroup{},
+ &DevicesGroup{},
+ &MemoryGroup{},
+ &CpuGroup{},
+ &CpuacctGroup{},
+ &PidsGroup{},
+ &BlkioGroup{},
+ &HugetlbGroup{},
+ &NetClsGroup{},
+ &NetPrioGroup{},
+ &PerfEventGroup{},
+ &FreezerGroup{},
+ &NameGroup{GroupName: "name=systemd", Join: true},
+ }
+ CgroupProcesses = "cgroup.procs"
+ HugePageSizes, _ = cgroups.GetHugePageSize()
+)
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+type subsystemSet []subsystem
+
+func (s subsystemSet) Get(name string) (subsystem, error) {
+ for _, ss := range s {
+ if ss.Name() == name {
+ return ss, nil
+ }
+ }
+ return nil, errSubsystemDoesNotExist
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Removes the cgroup represented by 'cgroupData'.
+ Remove(*cgroupData) error
+ // Creates and joins the cgroup represented by 'cgroupData'.
+ Apply(*cgroupData) error
+ // Set the cgroup represented by cgroup.
+ Set(path string, cgroup *configs.Cgroup) error
+}
+
+type Manager struct {
+ mu sync.Mutex
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+// The absolute path to the root of the cgroup hierarchies.
+var cgroupRootLock sync.Mutex
+var cgroupRoot string
+
+// Gets the cgroupRoot.
+func getCgroupRoot() (string, error) {
+ cgroupRootLock.Lock()
+ defer cgroupRootLock.Unlock()
+
+ if cgroupRoot != "" {
+ return cgroupRoot, nil
+ }
+
+ root, err := cgroups.FindCgroupMountpointDir()
+ if err != nil {
+ return "", err
+ }
+
+ if _, err := os.Stat(root); err != nil {
+ return "", err
+ }
+
+ cgroupRoot = root
+ return cgroupRoot, nil
+}
+
+type cgroupData struct {
+ root string
+ innerPath string
+ config *configs.Cgroup
+ pid int
+}
+
+func (m *Manager) Apply(pid int) (err error) {
+ if m.Cgroups == nil {
+ return nil
+ }
+
+ var c = m.Cgroups
+
+ d, err := getCgroupData(m.Cgroups, pid)
+ if err != nil {
+ return err
+ }
+
+ if c.Paths != nil {
+ paths := make(map[string]string)
+ for name, path := range c.Paths {
+ _, err := d.path(name)
+ if err != nil {
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[name] = path
+ }
+ m.Paths = paths
+ return cgroups.EnterPid(m.Paths, pid)
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ paths := make(map[string]string)
+ for _, sys := range subsystems {
+ if err := sys.Apply(d); err != nil {
+ return err
+ }
+ // TODO: Apply should, ideally, be reentrant or be broken up into a separate
+ // create and join phase so that the cgroup hierarchy for a container can be
+ // created then join consists of writing the process pids to cgroup.procs
+ p, err := d.path(sys.Name())
+ if err != nil {
+ // The non-presence of the devices subsystem is
+ // considered fatal for security reasons.
+ if cgroups.IsNotFound(err) && sys.Name() != "devices" {
+ continue
+ }
+ return err
+ }
+ paths[sys.Name()] = p
+ }
+ m.Paths = paths
+ return nil
+}
+
+func (m *Manager) Destroy() error {
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err := cgroups.RemovePaths(m.Paths); err != nil {
+ return err
+ }
+ m.Paths = make(map[string]string)
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ paths := m.Paths
+ m.mu.Unlock()
+ return paths
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for name, path := range m.Paths {
+ sys, err := subsystems.Get(name)
+ if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+ return stats, nil
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ for _, sys := range subsystems {
+ // Generate fake cgroup data.
+ d, err := getCgroupData(container.Cgroups, -1)
+ if err != nil {
+ return err
+ }
+ // Get the path, but don't error out if the cgroup wasn't found.
+ path, err := d.path(sys.Name())
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ if m.Paths["cpu"] != "" {
+ if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Freeze toggles the container's freezer cgroup depending on the state
+// provided
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ d, err := getCgroupData(m.Cgroups, 0)
+ if err != nil {
+ return err
+ }
+ dir, err := d.path("freezer")
+ if err != nil {
+ return err
+ }
+ prevState := m.Cgroups.Resources.Freezer
+ m.Cgroups.Resources.Freezer = state
+ freezer, err := subsystems.Get("freezer")
+ if err != nil {
+ return err
+ }
+ err = freezer.Set(dir, m.Cgroups)
+ if err != nil {
+ m.Cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ dir, err := getCgroupPath(m.Cgroups)
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetPids(dir)
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ dir, err := getCgroupPath(m.Cgroups)
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetAllPids(dir)
+}
+
+func getCgroupPath(c *configs.Cgroup) (string, error) {
+ d, err := getCgroupData(c, 0)
+ if err != nil {
+ return "", err
+ }
+
+ return d.path("devices")
+}
+
+func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
+ root, err := getCgroupRoot()
+ if err != nil {
+ return nil, err
+ }
+
+ if (c.Name != "" || c.Parent != "") && c.Path != "" {
+ return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
+ }
+
+ // XXX: Do not remove this code. Path safety is important! -- cyphar
+ cgPath := libcontainerUtils.CleanPath(c.Path)
+ cgParent := libcontainerUtils.CleanPath(c.Parent)
+ cgName := libcontainerUtils.CleanPath(c.Name)
+
+ innerPath := cgPath
+ if innerPath == "" {
+ innerPath = filepath.Join(cgParent, cgName)
+ }
+
+ return &cgroupData{
+ root: root,
+ innerPath: innerPath,
+ config: c,
+ pid: pid,
+ }, nil
+}
+
+func (raw *cgroupData) parentPath(subsystem, mountpoint, root string) (string, error) {
+ // Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating
+ // process could in container and shared pid namespace with host, and
+ // /proc/1/cgroup could point to whole other world of cgroups.
+ initPath, err := cgroups.GetThisCgroupDir(subsystem)
+ if err != nil {
+ return "", err
+ }
+ // This is needed for nested containers, because in /proc/self/cgroup we
+ // see pathes from host, which don't exist in container.
+ relDir, err := filepath.Rel(root, initPath)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(mountpoint, relDir), nil
+}
+
+func (raw *cgroupData) path(subsystem string) (string, error) {
+ mnt, root, err := cgroups.FindCgroupMountpointAndRoot(subsystem)
+ // If we didn't mount the subsystem, there is no point we make the path.
+ if err != nil {
+ return "", err
+ }
+
+ // If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
+ if filepath.IsAbs(raw.innerPath) {
+ // Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'.
+ return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
+ }
+
+ parentPath, err := raw.parentPath(subsystem, mnt, root)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(parentPath, raw.innerPath), nil
+}
+
+func (raw *cgroupData) join(subsystem string) (string, error) {
+ path, err := raw.path(subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return "", err
+ }
+ if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil {
+ return "", err
+ }
+ return path, nil
+}
+
+func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s", file)
+ }
+ if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700); err != nil {
+ return fmt.Errorf("failed to write %v to %v: %v", data, file, err)
+ }
+ return nil
+}
+
+func readFile(dir, file string) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join(dir, file))
+ return string(data), err
+}
+
+func removePath(p string, err error) error {
+ if err != nil {
+ return err
+ }
+ if p != "" {
+ return os.RemoveAll(p)
+ }
+ return nil
+}
+
+func CheckCpushares(path string, c int64) error {
+ var cpuShares int64
+
+ if c == 0 {
+ return nil
+ }
+
+ fd, err := os.Open(filepath.Join(path, "cpu.shares"))
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ _, err = fmt.Fscanf(fd, "%d", &cpuShares)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ if c > cpuShares {
+ return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
+ } else if c < cpuShares {
+ return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
new file mode 100644
index 0000000..a142cb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
@@ -0,0 +1,237 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type BlkioGroup struct {
+}
+
+func (s *BlkioGroup) Name() string {
+ return "blkio"
+}
+
+func (s *BlkioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("blkio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.BlkioWeight != 0 {
+ if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.BlkioLeafWeight != 0 {
+ if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
+ return err
+ }
+ }
+ for _, wd := range cgroup.Resources.BlkioWeightDevice {
+ if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
+ return err
+ }
+ if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
+ if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
+ if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *BlkioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("blkio"))
+}
+
+/*
+examples:
+
+ blkio.sectors
+ 8:0 6792
+
+ blkio.io_service_bytes
+ 8:0 Read 1282048
+ 8:0 Write 2195456
+ 8:0 Sync 2195456
+ 8:0 Async 1282048
+ 8:0 Total 3477504
+ Total 3477504
+
+ blkio.io_serviced
+ 8:0 Read 124
+ 8:0 Write 104
+ 8:0 Sync 104
+ 8:0 Async 124
+ 8:0 Total 228
+ Total 228
+
+ blkio.io_queued
+ 8:0 Read 0
+ 8:0 Write 0
+ 8:0 Sync 0
+ 8:0 Async 0
+ 8:0 Total 0
+ Total 0
+*/
+
+func splitBlkioStatLine(r rune) bool {
+ return r == ' ' || r == ':'
+}
+
+func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
+ var blkioStats []cgroups.BlkioStatEntry
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return blkioStats, nil
+ }
+ return nil, err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ // format: dev type amount
+ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
+ if len(fields) < 3 {
+ if len(fields) == 2 && fields[0] == "Total" {
+ // skip total line
+ continue
+ } else {
+ return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
+ }
+ }
+
+ v, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ major := v
+
+ v, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ minor := v
+
+ op := ""
+ valueField := 2
+ if len(fields) == 4 {
+ op = fields[2]
+ valueField = 3
+ }
+ v, err = strconv.ParseUint(fields[valueField], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
+ }
+
+ return blkioStats, nil
+}
+
+func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Try to read CFQ stats available on all CFQ enabled kernels first
+ if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
+ return getCFQStats(path, stats)
+ }
+ return getStats(path, stats) // Use generic stats as fallback
+}
+
+func getCFQStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.SectorsRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoQueuedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoWaitTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoMergedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoTimeRecursive = blkioStats
+
+ return nil
+}
+
+func getStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
new file mode 100644
index 0000000..a4ef28a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
@@ -0,0 +1,94 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type CpuGroup struct {
+}
+
+func (s *CpuGroup) Name() string {
+ return "cpu"
+}
+
+func (s *CpuGroup) Apply(d *cgroupData) error {
+ // We always want to join the cpu group, to allow fair cpu scheduling
+ // on a container basis
+ _, err := d.join("cpu")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpuShares != 0 {
+ if err := writeFile(path, "cpu.shares", strconv.FormatInt(cgroup.Resources.CpuShares, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuPeriod != 0 {
+ if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(cgroup.Resources.CpuPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuQuota != 0 {
+ if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtPeriod != 0 {
+ if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.Resources.CpuRtPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtRuntime != 0 {
+ if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *CpuGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpu"))
+}
+
+func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
+ f, err := os.Open(filepath.Join(path, "cpu.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return err
+ }
+ switch t {
+ case "nr_periods":
+ stats.CpuStats.ThrottlingData.Periods = v
+
+ case "nr_throttled":
+ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
+
+ case "throttled_time":
+ stats.CpuStats.ThrottlingData.ThrottledTime = v
+ }
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
new file mode 100644
index 0000000..53afbad
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
@@ -0,0 +1,121 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+const (
+ cgroupCpuacctStat = "cpuacct.stat"
+ nanosecondsInSecond = 1000000000
+)
+
+var clockTicks = uint64(system.GetClockTicks())
+
+type CpuacctGroup struct {
+}
+
+func (s *CpuacctGroup) Name() string {
+ return "cpuacct"
+}
+
+func (s *CpuacctGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *CpuacctGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuacct"))
+}
+
+func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
+ userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
+ if err != nil {
+ return err
+ }
+
+ totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
+ if err != nil {
+ return err
+ }
+
+ percpuUsage, err := getPercpuUsage(path)
+ if err != nil {
+ return err
+ }
+
+ stats.CpuStats.CpuUsage.TotalUsage = totalUsage
+ stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
+ stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
+ stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
+ return nil
+}
+
+// Returns user and kernel usage breakdown in nanoseconds.
+func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
+ userModeUsage := uint64(0)
+ kernelModeUsage := uint64(0)
+ const (
+ userField = "user"
+ systemField = "system"
+ )
+
+ // Expected format:
+ // user <usage in ticks>
+ // system <usage in ticks>
+ data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
+ if err != nil {
+ return 0, 0, err
+ }
+ fields := strings.Fields(string(data))
+ if len(fields) != 4 {
+ return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
+ }
+ if fields[0] != userField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
+ }
+ if fields[2] != systemField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
+ }
+ if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
+ return 0, 0, err
+ }
+ if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
+ return 0, 0, err
+ }
+
+ return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
+}
+
+func getPercpuUsage(path string) ([]uint64, error) {
+ percpuUsage := []uint64{}
+ data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
+ if err != nil {
+ return percpuUsage, err
+ }
+ for _, value := range strings.Fields(string(data)) {
+ value, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
+ }
+ percpuUsage = append(percpuUsage, value)
+ }
+ return percpuUsage, nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
new file mode 100644
index 0000000..cbe62bd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
@@ -0,0 +1,139 @@
+// +build linux
+
+package fs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+type CpusetGroup struct {
+}
+
+func (s *CpusetGroup) Name() string {
+ return "cpuset"
+}
+
+func (s *CpusetGroup) Apply(d *cgroupData) error {
+ dir, err := d.path("cpuset")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return s.ApplyDir(dir, d.config, d.pid)
+}
+
+func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpusetCpus != "" {
+ if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpusetMems != "" {
+ if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuset"))
+}
+
+func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
+
+func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
+ // This might happen if we have no cpuset cgroup mounted.
+ // Just do nothing and don't fail.
+ if dir == "" {
+ return nil
+ }
+ root, err := getCgroupRoot()
+ if err != nil {
+ return err
+ }
+ if err := s.ensureParent(dir, root); err != nil {
+ return err
+ }
+ // because we are not using d.join we need to place the pid into the procs file
+ // unlike the other subsystems
+ if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
+ if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
+ return
+ }
+ if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
+ return
+ }
+ return cpus, mems, nil
+}
+
+// ensureParent makes sure that the parent directory of current is created
+// and populated with the proper cpus and mems files copied from
+// it's parent.
+func (s *CpusetGroup) ensureParent(current, root string) error {
+ parent := filepath.Dir(current)
+ if libcontainerUtils.CleanPath(parent) == root {
+ return nil
+ }
+ // Avoid infinite recursion.
+ if parent == current {
+ return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
+ }
+ if err := s.ensureParent(parent, root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(current, 0755); err != nil {
+ return err
+ }
+ return s.copyIfNeeded(current, parent)
+}
+
+// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
+ var (
+ err error
+ currentCpus, currentMems []byte
+ parentCpus, parentMems []byte
+ )
+
+ if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
+ return err
+ }
+ if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
+ return err
+ }
+
+ if s.isEmpty(currentCpus) {
+ if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
+ return err
+ }
+ }
+ if s.isEmpty(currentMems) {
+ if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) isEmpty(b []byte) bool {
+ return len(bytes.Trim(b, "\n")) == 0
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
new file mode 100644
index 0000000..5f78331
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
@@ -0,0 +1,78 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+type DevicesGroup struct {
+}
+
+func (s *DevicesGroup) Name() string {
+ return "devices"
+}
+
+func (s *DevicesGroup) Apply(d *cgroupData) error {
+ _, err := d.join("devices")
+ if err != nil {
+ // We will return error even it's `not found` error, devices
+ // cgroup is hard requirement for container's security.
+ return err
+ }
+ return nil
+}
+
+func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if system.RunningInUserNS() {
+ return nil
+ }
+
+ devices := cgroup.Resources.Devices
+ if len(devices) > 0 {
+ for _, dev := range devices {
+ file := "devices.deny"
+ if dev.Allow {
+ file = "devices.allow"
+ }
+ if err := writeFile(path, file, dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if !cgroup.Resources.AllowAllDevices {
+ if err := writeFile(path, "devices.deny", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.AllowedDevices {
+ if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if err := writeFile(path, "devices.allow", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.DeniedDevices {
+ if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *DevicesGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("devices"))
+}
+
+func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
new file mode 100644
index 0000000..e70dfe3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
@@ -0,0 +1,61 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type FreezerGroup struct {
+}
+
+func (s *FreezerGroup) Name() string {
+ return "freezer"
+}
+
+func (s *FreezerGroup) Apply(d *cgroupData) error {
+ _, err := d.join("freezer")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
+ switch cgroup.Resources.Freezer {
+ case configs.Frozen, configs.Thawed:
+ if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil {
+ return err
+ }
+
+ for {
+ state, err := readFile(path, "freezer.state")
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) {
+ break
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+ case configs.Undefined:
+ return nil
+ default:
+ return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
+ }
+
+ return nil
+}
+
+func (s *FreezerGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("freezer"))
+}
+
+func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
new file mode 100644
index 0000000..3ef9e03
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package fs
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 0000000..2f97277
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,71 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type HugetlbGroup struct {
+}
+
+func (s *HugetlbGroup) Name() string {
+ return "hugetlb"
+}
+
+func (s *HugetlbGroup) Apply(d *cgroupData) error {
+ _, err := d.join("hugetlb")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, hugetlb := range cgroup.Resources.HugetlbLimit {
+ if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *HugetlbGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("hugetlb"))
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+ hugetlbStats := cgroups.HugetlbStats{}
+ for _, pageSize := range HugePageSizes {
+ usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ hugetlbStats.Usage = value
+
+ maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".")
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ hugetlbStats.MaxUsage = value
+
+ failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".")
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ hugetlbStats.Failcnt = value
+
+ stats.HugetlbStats[pageSize] = hugetlbStats
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
new file mode 100644
index 0000000..b837128
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
@@ -0,0 +1,291 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type MemoryGroup struct {
+}
+
+func (s *MemoryGroup) Name() string {
+ return "memory"
+}
+
+func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
+ path, err := d.path("memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ // reset error.
+ err = nil
+ if path == "" {
+ // Invalid input.
+ return fmt.Errorf("invalid path for memory cgroups: %+v", d)
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(path)
+ }
+ }()
+ if !cgroups.PathExists(path) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ if memoryAssigned(d.config) {
+ // We have to set kernel memory here, as we can't change it once
+ // processes have been attached to the cgroup.
+ if err = s.SetKernelMemory(path, d.config); err != nil {
+ return err
+ }
+ }
+ // We need to join memory cgroup after set memory limits, because
+ // kmem.limit_in_bytes can only be set when the cgroup is empty.
+ if _, jerr := d.join("memory"); jerr != nil && !cgroups.IsNotFound(jerr) {
+ err = jerr
+ return err
+ }
+ return nil
+}
+
+func getModifyTime(path string) (time.Time, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to get memory cgroups creation time: %v", err)
+ }
+ return stat.ModTime(), nil
+}
+
+func (s *MemoryGroup) SetKernelMemory(path string, cgroup *configs.Cgroup) error {
+ // This has to be done separately because it has special
+ // constraints (it can only be initialized before setting up a
+ // hierarchy or adding a task to the cgroups. However, if
+ // sucessfully initialized, it can be updated anytime afterwards)
+ if cgroup.Resources.KernelMemory != 0 {
+ // Is kmem.limit_in_bytes already set?
+ // memory.kmem.max_usage_in_bytes is a read-only file. Use it to get cgroups creation time.
+ kmemCreationTime, err := getModifyTime(filepath.Join(path, "memory.kmem.max_usage_in_bytes"))
+ if err != nil {
+ return err
+ }
+ kmemLimitsUpdateTime, err := getModifyTime(filepath.Join(path, "memory.kmem.limit_in_bytes"))
+ if err != nil {
+ return err
+ }
+ // kmem.limit_in_bytes has already been set if its update time is after that of creation time.
+ // We use `!=` op instead of `>` because updates are losing precision compared to creation.
+ kmemInitialized := !kmemLimitsUpdateTime.Equal(kmemCreationTime)
+ if !kmemInitialized {
+ // If there's already tasks in the cgroup, we can't change the limit either
+ tasks, err := getCgroupParamString(path, "tasks")
+ if err != nil {
+ return err
+ }
+ if tasks != "" {
+ return fmt.Errorf("cannot set kmem.limit_in_bytes after task have joined this cgroup")
+ }
+ }
+ if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
+ // When memory and swap memory are both set, we need to handle the cases
+ // for updating container.
+ if cgroup.Resources.Memory != 0 && cgroup.Resources.MemorySwap > 0 {
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+
+ // When update memory limit, we should adapt the write sequence
+ // for memory and swap memory, so it won't fail because the new
+ // value and the old value don't fit kernel's validation.
+ if memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) {
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ } else {
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ } else {
+ if cgroup.Resources.Memory != 0 {
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwap > 0 {
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if err := setMemoryAndSwap(path, cgroup); err != nil {
+ return err
+ }
+
+ if err := s.SetKernelMemory(path, cgroup); err != nil {
+ return err
+ }
+
+ if cgroup.Resources.MemoryReservation != 0 {
+ if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.KernelMemoryTCP != 0 {
+ if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.OomKillDisable {
+ if err := writeFile(path, "memory.oom_control", "1"); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
+ return nil
+ } else if int64(*cgroup.Resources.MemorySwappiness) >= 0 && int64(*cgroup.Resources.MemorySwappiness) <= 100 {
+ if err := writeFile(path, "memory.swappiness", strconv.FormatInt(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", int64(*cgroup.Resources.MemorySwappiness))
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("memory"))
+}
+
+func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Set stats from memory.stat.
+ statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer statsFile.Close()
+
+ sc := bufio.NewScanner(statsFile)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
+ }
+ stats.MemoryStats.Stats[t] = v
+ }
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
+
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.Usage = memoryUsage
+ swapUsage, err := getMemoryData(path, "memsw")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.SwapUsage = swapUsage
+ kernelUsage, err := getMemoryData(path, "kmem")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelUsage = kernelUsage
+ kernelTCPUsage, err := getMemoryData(path, "kmem.tcp")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelTCPUsage = kernelTCPUsage
+
+ return nil
+}
+
+func memoryAssigned(cgroup *configs.Cgroup) bool {
+ return cgroup.Resources.Memory != 0 ||
+ cgroup.Resources.MemoryReservation != 0 ||
+ cgroup.Resources.MemorySwap > 0 ||
+ cgroup.Resources.KernelMemory > 0 ||
+ cgroup.Resources.KernelMemoryTCP > 0 ||
+ cgroup.Resources.OomKillDisable ||
+ (cgroup.Resources.MemorySwappiness != nil && *cgroup.Resources.MemorySwappiness != -1)
+}
+
+func getMemoryData(path, name string) (cgroups.MemoryData, error) {
+ memoryData := cgroups.MemoryData{}
+
+ moduleName := "memory"
+ if name != "" {
+ moduleName = strings.Join([]string{"memory", name}, ".")
+ }
+ usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
+ maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
+ failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
+ limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
+
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ memoryData.Usage = value
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ memoryData.MaxUsage = value
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ memoryData.Failcnt = value
+ value, err = getCgroupParamUint(path, limit)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
+ }
+ memoryData.Limit = value
+
+ return memoryData, nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
new file mode 100644
index 0000000..d8cf1d8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
@@ -0,0 +1,40 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NameGroup struct {
+ GroupName string
+ Join bool
+}
+
+func (s *NameGroup) Name() string {
+ return s.GroupName
+}
+
+func (s *NameGroup) Apply(d *cgroupData) error {
+ if s.Join {
+ // ignore errors if the named cgroup does not exist
+ d.join(s.GroupName)
+ }
+ return nil
+}
+
+func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *NameGroup) Remove(d *cgroupData) error {
+ if s.Join {
+ removePath(d.path(s.GroupName))
+ }
+ return nil
+}
+
+func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
new file mode 100644
index 0000000..8a4054b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetClsGroup struct {
+}
+
+func (s *NetClsGroup) Name() string {
+ return "net_cls"
+}
+
+func (s *NetClsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_cls")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.NetClsClassid != "" {
+ if err := writeFile(path, "net_cls.classid", cgroup.Resources.NetClsClassid); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetClsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_cls"))
+}
+
+func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
new file mode 100644
index 0000000..d0ab2af
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetPrioGroup struct {
+}
+
+func (s *NetPrioGroup) Name() string {
+ return "net_prio"
+}
+
+func (s *NetPrioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_prio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, prioMap := range cgroup.Resources.NetPrioIfpriomap {
+ if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetPrioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_prio"))
+}
+
+func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
new file mode 100644
index 0000000..5693676
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
@@ -0,0 +1,35 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PerfEventGroup struct {
+}
+
+func (s *PerfEventGroup) Name() string {
+ return "perf_event"
+}
+
+func (s *PerfEventGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *PerfEventGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("perf_event"))
+}
+
+func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
new file mode 100644
index 0000000..f1e3720
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
@@ -0,0 +1,73 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PidsGroup struct {
+}
+
+func (s *PidsGroup) Name() string {
+ return "pids"
+}
+
+func (s *PidsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("pids")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.PidsLimit != 0 {
+ // "max" is the fallback value.
+ limit := "max"
+
+ if cgroup.Resources.PidsLimit > 0 {
+ limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
+ }
+
+ if err := writeFile(path, "pids.max", limit); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *PidsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("pids"))
+}
+
+func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ current, err := getCgroupParamUint(path, "pids.current")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.current - %s", err)
+ }
+
+ maxString, err := getCgroupParamString(path, "pids.max")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.max - %s", err)
+ }
+
+ // Default if pids.max == "max" is 0 -- which represents "no limit".
+ var max uint64
+ if maxString != "max" {
+ max, err = parseUint(maxString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max"))
+ }
+ }
+
+ stats.PidsStats.Current = current
+ stats.PidsStats.Limit = max
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
new file mode 100644
index 0000000..5ff0a16
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
@@ -0,0 +1,78 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var (
+ ErrNotValidFormat = errors.New("line is not a valid key value format")
+)
+
+// Saturates negative values at zero and returns a uint64.
+// Due to kernel bugs, some of the memory cgroup stats can be negative.
+func parseUint(s string, base, bitSize int) (uint64, error) {
+ value, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
+ return 0, nil
+ }
+
+ return value, err
+ }
+
+ return value, nil
+}
+
+// Parses a cgroup param and returns as name, value
+// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
+func getCgroupParamKeyValue(t string) (string, uint64, error) {
+ parts := strings.Fields(t)
+ switch len(parts) {
+ case 2:
+ value, err := parseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err)
+ }
+
+ return parts[0], value, nil
+ default:
+ return "", 0, ErrNotValidFormat
+ }
+}
+
+// Gets a single uint64 value from the specified cgroup file.
+func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
+ fileName := filepath.Join(cgroupPath, cgroupFile)
+ contents, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return 0, err
+ }
+
+ res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName)
+ }
+ return res, nil
+}
+
+// Gets a string value from the specified cgroup file
+func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) {
+ contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(contents)), nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
new file mode 100644
index 0000000..b483f1b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
@@ -0,0 +1,106 @@
+// +build linux
+
+package cgroups
+
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods,omitempty"`
+ // Number of periods when the container hit its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+}
+
+// CpuUsage denotes the usage of a CPU.
+// All CPU stats are aggregate since container inception.
+type CpuUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds.
+ TotalUsage uint64 `json:"total_usage,omitempty"`
+ // Total CPU time consumed per core.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+ // Time spent by tasks of the cgroup in kernel mode.
+ // Units: nanoseconds.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+ // Time spent by tasks of the cgroup in user mode.
+ // Units: nanoseconds.
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+ CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryData struct {
+ Usage uint64 `json:"usage,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ Failcnt uint64 `json:"failcnt"`
+ Limit uint64 `json:"limit"`
+}
+
+type MemoryStats struct {
+ // memory used for cache
+ Cache uint64 `json:"cache,omitempty"`
+ // usage of memory
+ Usage MemoryData `json:"usage,omitempty"`
+ // usage of memory + swap
+ SwapUsage MemoryData `json:"swap_usage,omitempty"`
+ // usage of kernel memory
+ KernelUsage MemoryData `json:"kernel_usage,omitempty"`
+ // usage of kernel TCP memory
+ KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
+
+ Stats map[string]uint64 `json:"stats,omitempty"`
+}
+
+type PidsStats struct {
+ // number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // active pids hard limit
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+type BlkioStatEntry struct {
+ Major uint64 `json:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty"`
+ Op string `json:"op,omitempty"`
+ Value uint64 `json:"value,omitempty"`
+}
+
+type BlkioStats struct {
+ // number of bytes tranferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
+}
+
+type HugetlbStats struct {
+ // current res_counter usage for hugetlb
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // number of times hugetlb usage allocation failure.
+ Failcnt uint64 `json:"failcnt"`
+}
+
+type Stats struct {
+ CpuStats CpuStats `json:"cpu_stats,omitempty"`
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+ // the map is in the format "size of hugepage: stats of the hugepage"
+ HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
+}
+
+func NewStats() *Stats {
+ memoryStats := MemoryStats{Stats: make(map[string]uint64)}
+ hugetlbStats := make(map[string]HugetlbStats)
+ return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
new file mode 100644
index 0000000..1a7c4e1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -0,0 +1,413 @@
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/go-units"
+)
+
+const cgroupNamePrefix = "name="
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
+func FindCgroupMountpoint(subsystem string) (string, error) {
+ // We are not using mount.GetMounts() because it's super-inefficient,
+ // parsing it directly sped up x10 times because of not using Sscanf.
+ // It was one of two major performance drawbacks in container start.
+ if !isSubsystemAvailable(subsystem) {
+ return "", NewNotFoundError(subsystem)
+ }
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) {
+ if !isSubsystemAvailable(subsystem) {
+ return "", "", NewNotFoundError(subsystem)
+ }
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], fields[3], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", "", err
+ }
+
+ return "", "", NewNotFoundError(subsystem)
+}
+
+func isSubsystemAvailable(subsystem string) bool {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return false
+ }
+ _, avail := cgroups[subsystem]
+ return avail
+}
+
+func FindCgroupMountpointDir() (string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ text := scanner.Text()
+ fields := strings.Split(text, " ")
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ numPostFields := len(postSeparatorFields)
+
+ // This is an error as we can't detect if the mount is for "cgroup"
+ if numPostFields == 0 {
+ return "", fmt.Errorf("Found no fields post '-' in %q", text)
+ }
+
+ if postSeparatorFields[0] == "cgroup" {
+ // Check that the mount is properly formated.
+ if numPostFields < 3 {
+ return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ return filepath.Dir(fields[4]), nil
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError("cgroup")
+}
+
+type Mount struct {
+ Mountpoint string
+ Root string
+ Subsystems []string
+}
+
+func (m Mount) GetThisCgroupDir(cgroups map[string]string) (string, error) {
+ if len(m.Subsystems) == 0 {
+ return "", fmt.Errorf("no subsystem for mount")
+ }
+
+ return getControllerPath(m.Subsystems[0], cgroups)
+}
+
+func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) {
+ res := make([]Mount, 0, len(ss))
+ scanner := bufio.NewScanner(mi)
+ numFound := 0
+ for scanner.Scan() && numFound < len(ss) {
+ txt := scanner.Text()
+ sepIdx := strings.Index(txt, " - ")
+ if sepIdx == -1 {
+ return nil, fmt.Errorf("invalid mountinfo format")
+ }
+ if txt[sepIdx+3:sepIdx+9] != "cgroup" {
+ continue
+ }
+ fields := strings.Split(txt, " ")
+ m := Mount{
+ Mountpoint: fields[4],
+ Root: fields[3],
+ }
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if !ss[opt] {
+ continue
+ }
+ if strings.HasPrefix(opt, cgroupNamePrefix) {
+ m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
+ } else {
+ m.Subsystems = append(m.Subsystems, opt)
+ }
+ numFound++
+ }
+ res = append(res, m)
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func GetCgroupMounts() ([]Mount, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ all, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return nil, err
+ }
+
+ allMap := make(map[string]bool)
+ for s := range all {
+ allMap[s] = true
+ }
+ return getCgroupMountsHelper(allMap, f)
+}
+
+// GetAllSubsystems returns all the cgroup subsystems supported by the kernel
+func GetAllSubsystems() ([]string, error) {
+ f, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ subsystems := []string{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ text := s.Text()
+ if text[0] != '#' {
+ parts := strings.Fields(text)
+ if len(parts) >= 4 && parts[3] != "0" {
+ subsystems = append(subsystems, parts[0])
+ }
+ }
+ }
+ return subsystems, nil
+}
+
+// GetThisCgroupDir returns the relative path to the cgroup docker is running in.
+func GetThisCgroupDir(subsystem string) (string, error) {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetInitCgroupDir(subsystem string) (string, error) {
+
+ cgroups, err := ParseCgroupFile("/proc/1/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func readProcsFile(dir string) ([]int, error) {
+ f, err := os.Open(filepath.Join(dir, "cgroup.procs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var (
+ s = bufio.NewScanner(f)
+ out = []int{}
+ )
+
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ pid, err := strconv.Atoi(t)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, pid)
+ }
+ }
+ return out, nil
+}
+
+// ParseCgroupFile parses the given cgroup file, typically from
+// /proc/<pid>/cgroup, into a map of subgroups to cgroup names.
+func ParseCgroupFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseCgroupFromReader(f)
+}
+
+// helper function for ParseCgroupFile to make testing easier
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+ s := bufio.NewScanner(r)
+ cgroups := make(map[string]string)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ // from cgroups(7):
+ // /proc/[pid]/cgroup
+ // ...
+ // For each cgroup hierarchy ... there is one entry
+ // containing three colon-separated fields of the form:
+ // hierarchy-ID:subsystem-list:cgroup-path
+ parts := strings.SplitN(text, ":", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
+ }
+
+ for _, subs := range strings.Split(parts[1], ",") {
+ cgroups[subs] = parts[2]
+ }
+ }
+ return cgroups, nil
+}
+
+func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+
+ if p, ok := cgroups[subsystem]; ok {
+ return p, nil
+ }
+
+ if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok {
+ return p, nil
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func PathExists(path string) bool {
+ if _, err := os.Stat(path); err != nil {
+ return false
+ }
+ return true
+}
+
+func EnterPid(cgroupPaths map[string]string, pid int) error {
+ for _, path := range cgroupPaths {
+ if PathExists(path) {
+ if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"),
+ []byte(strconv.Itoa(pid)), 0700); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// RemovePaths iterates over the provided paths removing them.
+// We trying to remove all paths five times with increasing delay between tries.
+// If after all there are not removed cgroups - appropriate error will be
+// returned.
+func RemovePaths(paths map[string]string) (err error) {
+ delay := 10 * time.Millisecond
+ for i := 0; i < 5; i++ {
+ if i != 0 {
+ time.Sleep(delay)
+ delay *= 2
+ }
+ for s, p := range paths {
+ os.RemoveAll(p)
+ // TODO: here probably should be logging
+ _, err := os.Stat(p)
+ // We need this strange way of checking cgroups existence because
+ // RemoveAll almost always returns error, even on already removed
+ // cgroups
+ if os.IsNotExist(err) {
+ delete(paths, s)
+ }
+ }
+ if len(paths) == 0 {
+ return nil
+ }
+ }
+ return fmt.Errorf("Failed to remove paths: %v", paths)
+}
+
+func GetHugePageSize() ([]string, error) {
+ var pageSizes []string
+ sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"}
+ files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+ if err != nil {
+ return pageSizes, err
+ }
+ for _, st := range files {
+ nameArray := strings.Split(st.Name(), "-")
+ pageSize, err := units.RAMInBytes(nameArray[1])
+ if err != nil {
+ return []string{}, err
+ }
+ sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)
+ pageSizes = append(pageSizes, sizeString)
+ }
+
+ return pageSizes, nil
+}
+
+// GetPids returns all pids, that were added to cgroup at path.
+func GetPids(path string) ([]int, error) {
+ return readProcsFile(path)
+}
+
+// GetAllPids returns all pids, that were added to cgroup at path and to all its
+// subcgroups.
+func GetAllPids(path string) ([]int, error) {
+ var pids []int
+ // collect pids from all sub-cgroups
+ err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
+ dir, file := filepath.Split(p)
+ if file != "cgroup.procs" {
+ return nil
+ }
+ if iErr != nil {
+ return iErr
+ }
+ cPids, err := readProcsFile(dir)
+ if err != nil {
+ return err
+ }
+ pids = append(pids, cPids...)
+ return nil
+ })
+ return pids, err
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
new file mode 100644
index 0000000..e0f3ca1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -0,0 +1,61 @@
+package configs
+
+import "fmt"
+
+// blockIODevice holds major:minor format supported in blkio cgroup
+type blockIODevice struct {
+ // Major is the device's major number
+ Major int64 `json:"major"`
+ // Minor is the device's minor number
+ Minor int64 `json:"minor"`
+}
+
+// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
+type WeightDevice struct {
+ blockIODevice
+ // Weight is the bandwidth rate for the device, range is from 10 to 1000
+ Weight uint16 `json:"weight"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ LeafWeight uint16 `json:"leafWeight"`
+}
+
+// NewWeightDevice returns a configured WeightDevice pointer
+func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice {
+ wd := &WeightDevice{}
+ wd.Major = major
+ wd.Minor = minor
+ wd.Weight = weight
+ wd.LeafWeight = leafWeight
+ return wd
+}
+
+// WeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) WeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)
+}
+
+// LeafWeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) LeafWeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)
+}
+
+// ThrottleDevice struct holds a `major:minor rate_per_second` pair
+type ThrottleDevice struct {
+ blockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// NewThrottleDevice returns a configured ThrottleDevice pointer
+func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
+ td := &ThrottleDevice{}
+ td.Major = major
+ td.Minor = minor
+ td.Rate = rate
+ return td
+}
+
+// String formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) String() string {
+ return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go
new file mode 100644
index 0000000..f2eff91
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go
@@ -0,0 +1,124 @@
+// +build linux freebsd
+
+package configs
+
+type FreezerState string
+
+const (
+ Undefined FreezerState = ""
+ Frozen FreezerState = "FROZEN"
+ Thawed FreezerState = "THAWED"
+)
+
+type Cgroup struct {
+ // Deprecated, use Path instead
+ Name string `json:"name,omitempty"`
+
+ // name of parent of cgroup or slice
+ // Deprecated, use Path instead
+ Parent string `json:"parent,omitempty"`
+
+ // Path specifies the path to cgroups that are created and/or joined by the container.
+ // The path is assumed to be relative to the host system cgroup mountpoint.
+ Path string `json:"path"`
+
+ // ScopePrefix decribes prefix for the scope name
+ ScopePrefix string `json:"scope_prefix"`
+
+ // Paths represent the absolute cgroups paths to join.
+ // This takes precedence over Path.
+ Paths map[string]string
+
+ // Resources contains various cgroups settings to apply
+ *Resources
+}
+
+type Resources struct {
+ // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
+ // Deprecated
+ AllowAllDevices bool `json:"allow_all_devices,omitempty"`
+ // Deprecated
+ AllowedDevices []*Device `json:"allowed_devices,omitempty"`
+ // Deprecated
+ DeniedDevices []*Device `json:"denied_devices,omitempty"`
+
+ Devices []*Device `json:"devices"`
+
+ // Memory limit (in bytes)
+ Memory int64 `json:"memory"`
+
+ // Memory reservation or soft_limit (in bytes)
+ MemoryReservation int64 `json:"memory_reservation"`
+
+ // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwap int64 `json:"memory_swap"`
+
+ // Kernel memory limit (in bytes)
+ KernelMemory int64 `json:"kernel_memory"`
+
+ // Kernel memory limit for TCP use (in bytes)
+ KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
+
+ // CPU shares (relative weight vs. other containers)
+ CpuShares int64 `json:"cpu_shares"`
+
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ CpuQuota int64 `json:"cpu_quota"`
+
+ // CPU period to be used for hardcapping (in usecs). 0 to use system default.
+ CpuPeriod int64 `json:"cpu_period"`
+
+ // How many time CPU will use in realtime scheduling (in usecs).
+ CpuRtRuntime int64 `json:"cpu_quota"`
+
+ // CPU period to be used for realtime scheduling (in usecs).
+ CpuRtPeriod int64 `json:"cpu_period"`
+
+ // CPU to use
+ CpusetCpus string `json:"cpuset_cpus"`
+
+ // MEM to use
+ CpusetMems string `json:"cpuset_mems"`
+
+ // Process limit; set <= `0' to disable limit.
+ PidsLimit int64 `json:"pids_limit"`
+
+ // Specifies per cgroup weight, range is from 10 to 1000.
+ BlkioWeight uint16 `json:"blkio_weight"`
+
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
+
+ // Weight per cgroup per device, can override BlkioWeight.
+ BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
+
+ // IO read rate limit per cgroup per device, bytes per second.
+ BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
+
+ // IO write rate limit per cgroup per divice, bytes per second.
+ BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
+
+ // IO read rate limit per cgroup per device, IO per second.
+ BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
+
+ // IO write rate limit per cgroup per device, IO per second.
+ BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
+
+ // set the freeze value for the process
+ Freezer FreezerState `json:"freezer"`
+
+ // Hugetlb limit (in bytes)
+ HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
+
+ // Whether to disable OOM Killer
+ OomKillDisable bool `json:"oom_kill_disable"`
+
+ // Tuning swappiness behaviour per cgroup
+ MemorySwappiness *int64 `json:"memory_swappiness"`
+
+ // Set priority of network traffic for container
+ NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
+
+ // Set class identifier for container's network packets
+ NetClsClassid string `json:"net_cls_classid"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
new file mode 100644
index 0000000..95e2830
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
@@ -0,0 +1,6 @@
+// +build !windows,!linux,!freebsd
+
+package configs
+
+type Cgroup struct {
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
new file mode 100644
index 0000000..d74847b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
@@ -0,0 +1,6 @@
+package configs
+
+// TODO Windows: This can ultimately be entirely factored out on Windows as
+// cgroups are a Unix-specific construct.
+type Cgroup struct {
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
new file mode 100644
index 0000000..806e0be
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -0,0 +1,332 @@
+package configs
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type Rlimit struct {
+ Type int `json:"type"`
+ Hard uint64 `json:"hard"`
+ Soft uint64 `json:"soft"`
+}
+
+// IDMap represents UID/GID Mappings for User Namespaces.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+// Seccomp represents syscall restrictions
+// By default, only the native architecture of the kernel is allowed to be used
+// for syscalls. Additional architectures can be added by specifying them in
+// Architectures.
+type Seccomp struct {
+ DefaultAction Action `json:"default_action"`
+ Architectures []string `json:"architectures"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Action is taken upon rule match in Seccomp
+type Action int
+
+const (
+ Kill Action = iota + 1
+ Errno
+ Trap
+ Allow
+ Trace
+)
+
+// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
+type Operator int
+
+const (
+ EqualTo Operator = iota + 1
+ NotEqualTo
+ GreaterThan
+ GreaterThanOrEqualTo
+ LessThan
+ LessThanOrEqualTo
+ MaskEqualTo
+)
+
+// Arg is a rule to match a specific syscall argument in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"value_two"`
+ Op Operator `json:"op"`
+}
+
+// Syscall is a rule to match a syscall in Seccomp
+type Syscall struct {
+ Name string `json:"name"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+}
+
+// TODO Windows. Many of these fields should be factored out into those parts
+// which are common across platforms, and those which are platform specific.
+
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
+ // This is a common option when the container is running in ramdisk
+ NoPivotRoot bool `json:"no_pivot_root"`
+
+ // ParentDeathSignal specifies the signal that is sent to the container's process in the case
+ // that the parent process dies.
+ ParentDeathSignal int `json:"parent_death_signal"`
+
+ // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set.
+ // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable.
+ // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot.
+ PivotDir string `json:"pivot_dir"`
+
+ // Path to a directory containing the container's root filesystem.
+ Rootfs string `json:"rootfs"`
+
+ // Readonlyfs will remount the container's rootfs as readonly where only externally mounted
+ // bind mounts are writtable.
+ Readonlyfs bool `json:"readonlyfs"`
+
+ // Specifies the mount propagation flags to be applied to /.
+ RootPropagation int `json:"rootPropagation"`
+
+ // Mounts specify additional source and destination paths that will be mounted inside the container's
+ // rootfs and mount namespace if specified
+ Mounts []*Mount `json:"mounts"`
+
+ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
+ Devices []*Device `json:"devices"`
+
+ MountLabel string `json:"mount_label"`
+
+ // Hostname optionally sets the container's hostname if provided
+ Hostname string `json:"hostname"`
+
+ // Namespaces specifies the container's namespaces that it should setup when cloning the init process
+ // If a namespace is not provided that namespace is shared from the container's parent process
+ Namespaces Namespaces `json:"namespaces"`
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capbilities not specified will be dropped from the processes capability mask
+ Capabilities []string `json:"capabilities"`
+
+ // Networks specifies the container's network setup to be created
+ Networks []*Network `json:"networks"`
+
+ // Routes can be specified to create entries in the route table as the container is started
+ Routes []*Route `json:"routes"`
+
+ // Cgroups specifies specific cgroup settings for the various subsystems that the container is
+ // placed into to limit the resources the container has available
+ Cgroups *Cgroup `json:"cgroups"`
+
+ // AppArmorProfile specifies the profile to apply to the process running in the container and is
+ // change at the time the process is execed
+ AppArmorProfile string `json:"apparmor_profile,omitempty"`
+
+ // ProcessLabel specifies the label to apply to the process running in the container. It is
+ // commonly used by selinux
+ ProcessLabel string `json:"process_label,omitempty"`
+
+ // Rlimits specifies the resource limits, such as max open files, to set in the container
+ // If Rlimits are not set, the container will inherit rlimits from the parent process
+ Rlimits []Rlimit `json:"rlimits,omitempty"`
+
+ // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
+ // for a process. Valid values are between the range [-1000, '1000'], where processes with
+ // higher scores are preferred for being killed.
+ // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
+ OomScoreAdj int `json:"oom_score_adj"`
+
+ // AdditionalGroups specifies the gids that should be added to supplementary groups
+ // in addition to those that the user belongs to.
+ AdditionalGroups []string `json:"additional_groups"`
+
+ // UidMappings is an array of User ID mappings for User Namespaces
+ UidMappings []IDMap `json:"uid_mappings"`
+
+ // GidMappings is an array of Group ID mappings for User Namespaces
+ GidMappings []IDMap `json:"gid_mappings"`
+
+ // MaskPaths specifies paths within the container's rootfs to mask over with a bind
+ // mount pointing to /dev/null as to prevent reads of the file.
+ MaskPaths []string `json:"mask_paths"`
+
+ // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
+ // so that these files prevent any writes.
+ ReadonlyPaths []string `json:"readonly_paths"`
+
+ // Sysctl is a map of properties and their values. It is the equivalent of using
+ // sysctl -w my.property.name value in Linux.
+ Sysctl map[string]string `json:"sysctl"`
+
+ // Seccomp allows actions to be taken whenever a syscall is made within the container.
+ // A number of rules are given, each having an action to be taken if a syscall matches it.
+ // A default action to be taken if no rules match is also given.
+ Seccomp *Seccomp `json:"seccomp"`
+
+ // NoNewPrivileges controls whether processes in the container can gain additional privileges.
+ NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
+
+ // Hooks are a collection of actions to perform at various container lifecycle events.
+ // CommandHooks are serialized to JSON, but other hooks are not.
+ Hooks *Hooks
+
+ // Version is the version of opencontainer specification that is supported.
+ Version string `json:"version"`
+
+ // Labels are user defined metadata that is stored in the config and populated on the state
+ Labels []string `json:"labels"`
+
+ // NoNewKeyring will not allocated a new session keyring for the container. It will use the
+ // callers keyring in this case.
+ NoNewKeyring bool `json:"no_new_keyring"`
+}
+
+type Hooks struct {
+ // Prestart commands are executed after the container namespaces are created,
+ // but before the user supplied command is executed from init.
+ Prestart []Hook
+
+ // Poststart commands are executed after the container init process starts.
+ Poststart []Hook
+
+ // Poststop commands are executed after the container init process exits.
+ Poststop []Hook
+}
+
+func (hooks *Hooks) UnmarshalJSON(b []byte) error {
+ var state struct {
+ Prestart []CommandHook
+ Poststart []CommandHook
+ Poststop []CommandHook
+ }
+
+ if err := json.Unmarshal(b, &state); err != nil {
+ return err
+ }
+
+ deserialize := func(shooks []CommandHook) (hooks []Hook) {
+ for _, shook := range shooks {
+ hooks = append(hooks, shook)
+ }
+
+ return hooks
+ }
+
+ hooks.Prestart = deserialize(state.Prestart)
+ hooks.Poststart = deserialize(state.Poststart)
+ hooks.Poststop = deserialize(state.Poststop)
+ return nil
+}
+
+func (hooks Hooks) MarshalJSON() ([]byte, error) {
+ serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
+ for _, hook := range hooks {
+ switch chook := hook.(type) {
+ case CommandHook:
+ serializableHooks = append(serializableHooks, chook)
+ default:
+ logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
+ }
+ }
+
+ return serializableHooks
+ }
+
+ return json.Marshal(map[string]interface{}{
+ "prestart": serialize(hooks.Prestart),
+ "poststart": serialize(hooks.Poststart),
+ "poststop": serialize(hooks.Poststop),
+ })
+}
+
+// HookState is the payload provided to a hook on execution.
+type HookState struct {
+ Version string `json:"ociVersion"`
+ ID string `json:"id"`
+ Pid int `json:"pid"`
+ Root string `json:"root"`
+ BundlePath string `json:"bundlePath"`
+}
+
+type Hook interface {
+ // Run executes the hook with the provided state.
+ Run(HookState) error
+}
+
+// NewFunctionHook will call the provided function when the hook is run.
+func NewFunctionHook(f func(HookState) error) FuncHook {
+ return FuncHook{
+ run: f,
+ }
+}
+
+type FuncHook struct {
+ run func(HookState) error
+}
+
+func (f FuncHook) Run(s HookState) error {
+ return f.run(s)
+}
+
+type Command struct {
+ Path string `json:"path"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Dir string `json:"dir"`
+ Timeout *time.Duration `json:"timeout"`
+}
+
+// NewCommandHook will execute the provided command when the hook is run.
+func NewCommandHook(cmd Command) CommandHook {
+ return CommandHook{
+ Command: cmd,
+ }
+}
+
+type CommandHook struct {
+ Command
+}
+
+func (c Command) Run(s HookState) error {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ cmd := exec.Cmd{
+ Path: c.Path,
+ Args: c.Args,
+ Env: c.Env,
+ Stdin: bytes.NewReader(b),
+ }
+ errC := make(chan error, 1)
+ go func() {
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ err = fmt.Errorf("%s: %s", err, out)
+ }
+ errC <- err
+ }()
+ if c.Timeout != nil {
+ select {
+ case err := <-errC:
+ return err
+ case <-time.After(*c.Timeout):
+ cmd.Process.Kill()
+ cmd.Wait()
+ return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
+ }
+ }
+ return <-errC
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go
new file mode 100644
index 0000000..a60554a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go
@@ -0,0 +1,51 @@
+// +build freebsd linux
+
+package configs
+
+import "fmt"
+
+// HostUID gets the root uid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostUID() (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.UidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no user mappings found.")
+ }
+ id, found := c.hostIDFromMapping(0, c.UidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no root user mapping found.")
+ }
+ return id, nil
+ }
+ // Return default root uid 0
+ return 0, nil
+}
+
+// HostGID gets the root gid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostGID() (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.GidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(0, c.GidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no root group mapping found.")
+ }
+ return id, nil
+ }
+ // Return default root gid 0
+ return 0, nil
+}
+
+// Utility function that gets a host ID for a container ID from user namespace map
+// if that ID is present in the map.
+func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
+ for _, m := range uMap {
+ if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (containerID - m.ContainerID)
+ return hostID, true
+ }
+ }
+ return -1, false
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
new file mode 100644
index 0000000..8701bb2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
@@ -0,0 +1,57 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ Wildcard = -1
+)
+
+// TODO Windows: This can be factored out in the future
+
+type Device struct {
+ // Device type, block, char, etc.
+ Type rune `json:"type"`
+
+ // Path to the device.
+ Path string `json:"path"`
+
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+
+ // Cgroup permissions format, rwm.
+ Permissions string `json:"permissions"`
+
+ // FileMode permission bits for the device.
+ FileMode os.FileMode `json:"file_mode"`
+
+ // Uid of the device.
+ Uid uint32 `json:"uid"`
+
+ // Gid of the device.
+ Gid uint32 `json:"gid"`
+
+ // Write the file to the allowed list
+ Allow bool `json:"allow"`
+}
+
+func (d *Device) CgroupString() string {
+ return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
+}
+
+func (d *Device) Mkdev() int {
+ return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
+}
+
+// deviceNumberString converts the device number to a string return result.
+func deviceNumberString(number int64) string {
+ if number == Wildcard {
+ return "*"
+ }
+ return fmt.Sprint(number)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
new file mode 100644
index 0000000..ba1f437
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
@@ -0,0 +1,125 @@
+// +build linux freebsd
+
+package configs
+
+var (
+ // DefaultSimpleDevices are devices that are to be both allowed and created.
+ DefaultSimpleDevices = []*Device{
+ // /dev/null and zero
+ {
+ Path: "/dev/null",
+ Type: 'c',
+ Major: 1,
+ Minor: 3,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/zero",
+ Type: 'c',
+ Major: 1,
+ Minor: 5,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ {
+ Path: "/dev/full",
+ Type: 'c',
+ Major: 1,
+ Minor: 7,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // consoles and ttys
+ {
+ Path: "/dev/tty",
+ Type: 'c',
+ Major: 5,
+ Minor: 0,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // /dev/urandom,/dev/random
+ {
+ Path: "/dev/urandom",
+ Type: 'c',
+ Major: 1,
+ Minor: 9,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/random",
+ Type: 'c',
+ Major: 1,
+ Minor: 8,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ }
+ DefaultAllowedDevices = append([]*Device{
+ // allow mknod for any device
+ {
+ Type: 'c',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+ {
+ Type: 'b',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+
+ {
+ Path: "/dev/console",
+ Type: 'c',
+ Major: 5,
+ Minor: 1,
+ Permissions: "rwm",
+ },
+ // /dev/pts/ - pts namespaces are "coming soon"
+ {
+ Path: "",
+ Type: 'c',
+ Major: 136,
+ Minor: Wildcard,
+ Permissions: "rwm",
+ },
+ {
+ Path: "",
+ Type: 'c',
+ Major: 5,
+ Minor: 2,
+ Permissions: "rwm",
+ },
+
+ // tuntap
+ {
+ Path: "",
+ Type: 'c',
+ Major: 10,
+ Minor: 200,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+ DefaultAutoCreatedDevices = append([]*Device{
+ {
+ // /dev/fuse is created but not allowed.
+ // This is to allow java to work. Because java
+ // Insists on there being a /dev/fuse
+ // https://github.com/docker/docker/issues/514
+ // https://github.com/docker/docker/issues/2393
+ //
+ Path: "/dev/fuse",
+ Type: 'c',
+ Major: 10,
+ Minor: 229,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+)
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
new file mode 100644
index 0000000..d302163
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
@@ -0,0 +1,9 @@
+package configs
+
+type HugepageLimit struct {
+ // which type of hugepage to limit.
+ Pagesize string `json:"page_size"`
+
+ // usage limit for hugepage.
+ Limit uint64 `json:"limit"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
new file mode 100644
index 0000000..9a0395e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
@@ -0,0 +1,14 @@
+package configs
+
+import (
+ "fmt"
+)
+
+type IfPrioMap struct {
+ Interface string `json:"interface"`
+ Priority int64 `json:"priority"`
+}
+
+func (i *IfPrioMap) CgroupString() string {
+ return fmt.Sprintf("%s %d", i.Interface, i.Priority)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
new file mode 100644
index 0000000..cc770c9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
@@ -0,0 +1,30 @@
+package configs
+
+type Mount struct {
+ // Source path for the mount.
+ Source string `json:"source"`
+
+ // Destination path for the mount inside the container.
+ Destination string `json:"destination"`
+
+ // Device the mount is for.
+ Device string `json:"device"`
+
+ // Mount flags.
+ Flags int `json:"flags"`
+
+ // Propagation Flags
+ PropagationFlags []int `json:"propagation_flags"`
+
+ // Mount data applied to the mount.
+ Data string `json:"data"`
+
+ // Relabel source if set, "z" indicates shared, "Z" indicates unshared.
+ Relabel string `json:"relabel"`
+
+ // Optional Command to be run before Source is mounted.
+ PremountCmds []Command `json:"premount_cmds"`
+
+ // Optional Command to be run after Source is mounted.
+ PostmountCmds []Command `json:"postmount_cmds"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
new file mode 100644
index 0000000..a3329a3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
@@ -0,0 +1,5 @@
+package configs
+
+type NamespaceType string
+
+type Namespaces []Namespace
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 0000000..fb4b852
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package configs
+
+import "syscall"
+
+func (n *Namespace) Syscall() int {
+ return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+ NEWNET: syscall.CLONE_NEWNET,
+ NEWNS: syscall.CLONE_NEWNS,
+ NEWUSER: syscall.CLONE_NEWUSER,
+ NEWIPC: syscall.CLONE_NEWIPC,
+ NEWUTS: syscall.CLONE_NEWUTS,
+ NEWPID: syscall.CLONE_NEWPID,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ var flag int
+ for _, v := range *n {
+ if v.Path != "" {
+ continue
+ }
+ flag |= namespaceInfo[v.Type]
+ }
+ return uintptr(flag)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 0000000..0547223
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!windows
+
+package configs
+
+func (n *Namespace) Syscall() int {
+ panic("No namespace syscall support")
+ return 0
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ panic("No namespace syscall support")
+ return uintptr(0)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
new file mode 100644
index 0000000..b9c820d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
@@ -0,0 +1,127 @@
+// +build linux freebsd
+
+package configs
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+const (
+ NEWNET NamespaceType = "NEWNET"
+ NEWPID NamespaceType = "NEWPID"
+ NEWNS NamespaceType = "NEWNS"
+ NEWUTS NamespaceType = "NEWUTS"
+ NEWIPC NamespaceType = "NEWIPC"
+ NEWUSER NamespaceType = "NEWUSER"
+)
+
+var (
+ nsLock sync.Mutex
+ supportedNamespaces = make(map[NamespaceType]bool)
+)
+
+// nsToFile converts the namespace type to its filename
+func nsToFile(ns NamespaceType) string {
+ switch ns {
+ case NEWNET:
+ return "net"
+ case NEWNS:
+ return "mnt"
+ case NEWPID:
+ return "pid"
+ case NEWIPC:
+ return "ipc"
+ case NEWUSER:
+ return "user"
+ case NEWUTS:
+ return "uts"
+ }
+ return ""
+}
+
+// IsNamespaceSupported returns whether a namespace is available or
+// not
+func IsNamespaceSupported(ns NamespaceType) bool {
+ nsLock.Lock()
+ defer nsLock.Unlock()
+ supported, ok := supportedNamespaces[ns]
+ if ok {
+ return supported
+ }
+ nsFile := nsToFile(ns)
+ // if the namespace type is unknown, just return false
+ if nsFile == "" {
+ return false
+ }
+ _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile))
+ // a namespace is supported if it exists and we have permissions to read it
+ supported = err == nil
+ supportedNamespaces[ns] = supported
+ return supported
+}
+
+func NamespaceTypes() []NamespaceType {
+ return []NamespaceType{
+ NEWNET,
+ NEWPID,
+ NEWNS,
+ NEWUTS,
+ NEWIPC,
+ NEWUSER,
+ }
+}
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+ Type NamespaceType `json:"type"`
+ Path string `json:"path"`
+}
+
+func (n *Namespace) GetPath(pid int) string {
+ if n.Path != "" {
+ return n.Path
+ }
+ return fmt.Sprintf("/proc/%d/ns/%s", pid, nsToFile(n.Type))
+}
+
+func (n *Namespaces) Remove(t NamespaceType) bool {
+ i := n.index(t)
+ if i == -1 {
+ return false
+ }
+ *n = append((*n)[:i], (*n)[i+1:]...)
+ return true
+}
+
+func (n *Namespaces) Add(t NamespaceType, path string) {
+ i := n.index(t)
+ if i == -1 {
+ *n = append(*n, Namespace{Type: t, Path: path})
+ return
+ }
+ (*n)[i].Path = path
+}
+
+func (n *Namespaces) index(t NamespaceType) int {
+ for i, ns := range *n {
+ if ns.Type == t {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n *Namespaces) Contains(t NamespaceType) bool {
+ return n.index(t) != -1
+}
+
+func (n *Namespaces) PathOf(t NamespaceType) string {
+ i := n.index(t)
+ if i == -1 {
+ return ""
+ }
+ return (*n)[i].Path
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
new file mode 100644
index 0000000..9a74033
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!freebsd
+
+package configs
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
new file mode 100644
index 0000000..ccdb228
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
@@ -0,0 +1,72 @@
+package configs
+
+// Network defines configuration for a container's networking stack
+//
+// The network configuration can be omitted from a container causing the
+// container to be setup with the host's networking stack
+type Network struct {
+ // Type sets the networks type, commonly veth and loopback
+ Type string `json:"type"`
+
+ // Name of the network interface
+ Name string `json:"name"`
+
+ // The bridge to use.
+ Bridge string `json:"bridge"`
+
+ // MacAddress contains the MAC address to set on the network interface
+ MacAddress string `json:"mac_address"`
+
+ // Address contains the IPv4 and mask to set on the network interface
+ Address string `json:"address"`
+
+ // Gateway sets the gateway address that is used as the default for the interface
+ Gateway string `json:"gateway"`
+
+ // IPv6Address contains the IPv6 and mask to set on the network interface
+ IPv6Address string `json:"ipv6_address"`
+
+ // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
+ IPv6Gateway string `json:"ipv6_gateway"`
+
+ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ Mtu int `json:"mtu"`
+
+ // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ TxQueueLen int `json:"txqueuelen"`
+
+ // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the
+ // container.
+ HostInterfaceName string `json:"host_interface_name"`
+
+ // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ // bridge port in the case of type veth
+ // Note: This is unsupported on some systems.
+ // Note: This does not apply to loopback interfaces.
+ HairpinMode bool `json:"hairpin_mode"`
+}
+
+// Routes can be specified to create entries in the route table as the container is started
+//
+// All of destination, source, and gateway should be either IPv4 or IPv6.
+// One of the three options must be present, and omitted entries will use their
+// IP family default for the route table. For IPv4 for example, setting the
+// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
+// destination of 0.0.0.0(or *) when viewed in the route table.
+type Route struct {
+ // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Destination string `json:"destination"`
+
+ // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Source string `json:"source"`
+
+ // Sets the gateway. Accepts IPv4 and IPv6
+ Gateway string `json:"gateway"`
+
+ // The device to set this route up for, for example: eth0
+ InterfaceName string `json:"interface_name"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
new file mode 100644
index 0000000..1afc52b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
@@ -0,0 +1,143 @@
+// +build linux
+
+package system
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+ "unsafe"
+)
+
+// If arg2 is nonzero, set the "child subreaper" attribute of the
+// calling process; if arg2 is zero, unset the attribute. When a
+// process is marked as a child subreaper, all of the children
+// that it creates, and their descendants, will be marked as
+// having a subreaper. In effect, a subreaper fulfills the role
+// of init(1) for its descendant processes. Upon termination of
+// a process that is orphaned (i.e., its immediate parent has
+// already terminated) and marked as having a subreaper, the
+// nearest still living ancestor subreaper will receive a SIGCHLD
+// signal and be able to wait(2) on the process to discover its
+// termination status.
+const PR_SET_CHILD_SUBREAPER = 36
+
+type ParentDeathSignal int
+
+func (p ParentDeathSignal) Restore() error {
+ if p == 0 {
+ return nil
+ }
+ current, err := GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ if p == current {
+ return nil
+ }
+ return p.Set()
+}
+
+func (p ParentDeathSignal) Set() error {
+ return SetParentDeathSignal(uintptr(p))
+}
+
+func Execv(cmd string, args []string, env []string) error {
+ name, err := exec.LookPath(cmd)
+ if err != nil {
+ return err
+ }
+
+ return syscall.Exec(name, args, env)
+}
+
+func Prlimit(pid, resource int, limit syscall.Rlimit) error {
+ _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func SetParentDeathSignal(sig uintptr) error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func GetParentDeathSignal() (ParentDeathSignal, error) {
+ var sig int
+ _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0)
+ if err != 0 {
+ return -1, err
+ }
+ return ParentDeathSignal(sig), nil
+}
+
+func SetKeepCaps() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+func ClearKeepCaps() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+func Setctty() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// RunningInUserNS detects whether we are currently running in a user namespace.
+// Copied from github.com/lxc/lxd/shared/util.go
+func RunningInUserNS() bool {
+ file, err := os.Open("/proc/self/uid_map")
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return false
+ }
+ defer file.Close()
+
+ buf := bufio.NewReader(file)
+ l, _, err := buf.ReadLine()
+ if err != nil {
+ return false
+ }
+
+ line := string(l)
+ var a, b, c int64
+ fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if a == 0 && b == 0 && c == 4294967295 {
+ return false
+ }
+ return true
+}
+
+// SetSubreaper sets the value i as the subreaper setting for the calling process
+func SetSubreaper(i int) error {
+ return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
+}
+
+func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
+ _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
new file mode 100644
index 0000000..37808a2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// look in /proc to find the process start time so that we can verify
+// that this pid has started after ourself
+func GetProcessStartTime(pid int) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
+ if err != nil {
+ return "", err
+ }
+
+ parts := strings.Split(string(data), " ")
+ // the starttime is located at pos 22
+ // from the man page
+ //
+ // starttime %llu (was %lu before Linux 2.6)
+ // (22) The time the process started after system boot. In kernels before Linux 2.6, this
+ // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks
+ // (divide by sysconf(_SC_CLK_TCK)).
+ return parts[22-1], nil // starts at 1
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go
new file mode 100644
index 0000000..615ff4c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go
@@ -0,0 +1,40 @@
+package system
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092
+//
+// We need different setns values for the different platforms and arch
+// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
+var setNsMap = map[string]uintptr{
+ "linux/386": 346,
+ "linux/arm64": 268,
+ "linux/amd64": 308,
+ "linux/arm": 375,
+ "linux/ppc": 350,
+ "linux/ppc64": 350,
+ "linux/ppc64le": 350,
+ "linux/s390x": 339,
+}
+
+var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+
+func SysSetns() uint32 {
+ return uint32(sysSetns)
+}
+
+func Setns(fd uintptr, flags uintptr) error {
+ ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+ if !exists {
+ return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+ _, _, err := syscall.RawSyscall(ns, fd, flags, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
new file mode 100644
index 0000000..c990065
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
@@ -0,0 +1,25 @@
+// +build linux,386
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
new file mode 100644
index 0000000..0816bf8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
@@ -0,0 +1,25 @@
+// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
new file mode 100644
index 0000000..3f780f3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
@@ -0,0 +1,25 @@
+// +build linux,arm
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
new file mode 100644
index 0000000..b3a07cb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
@@ -0,0 +1,12 @@
+// +build cgo,linux cgo,freebsd
+
+package system
+
+/*
+#include <unistd.h>
+*/
+import "C"
+
+func GetClockTicks() int {
+ return int(C.sysconf(C._SC_CLK_TCK))
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
new file mode 100644
index 0000000..d93b5d5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
@@ -0,0 +1,15 @@
+// +build !cgo windows
+
+package system
+
+func GetClockTicks() int {
+ // TODO figure out a better alternative for platforms where we're missing cgo
+ //
+ // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency().
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx
+ //
+ // An example of its usage can be found here.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
+
+ return 100
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
new file mode 100644
index 0000000..e7cfd62
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package system
+
+// RunningInUserNS is a stub for non-Linux systems
+// Always returns false
+func RunningInUserNS() bool {
+ return false
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
new file mode 100644
index 0000000..30f74df
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
@@ -0,0 +1,99 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _zero uintptr
+
+// Returns the size of xattrs and nil error
+// Requires path, takes allocated []byte or nil as last argument
+func Llistxattr(path string, dest []byte) (size int, err error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return -1, err
+ }
+ var newpathBytes unsafe.Pointer
+ if len(dest) > 0 {
+ newpathBytes = unsafe.Pointer(&dest[0])
+ } else {
+ newpathBytes = unsafe.Pointer(&_zero)
+ }
+
+ _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0)
+ size = int(_size)
+ if errno != 0 {
+ return -1, errno
+ }
+
+ return size, nil
+}
+
+// Returns a []byte slice if the xattr is set and nil otherwise
+// Requires path and its attribute as arguments
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ var sz int
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start with a 128 length byte array
+ sz = 128
+ dest := make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+
+ switch {
+ case errno == syscall.ENODATA:
+ return nil, errno
+ case errno == syscall.ENOTSUP:
+ return nil, errno
+ case errno == syscall.ERANGE:
+ // 128 byte array might just not be good enough,
+ // A dummy buffer is used ``uintptr(0)`` to get real size
+ // of the xattrs on disk
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0)
+ sz = int(_sz)
+ if sz < 0 {
+ return nil, errno
+ }
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno != 0 {
+ return nil, errno
+ }
+ case errno != 0:
+ return nil, errno
+ }
+ sz = int(_sz)
+ return dest[:sz], nil
+}
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
new file mode 100644
index 0000000..3466bfc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -0,0 +1,121 @@
+package utils
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+)
+
+const (
+ exitSignalOffset = 128
+)
+
+// GenerateRandomName returns a new name joined with a prefix. This size
+// specified is used to truncate the randomly generated value
+func GenerateRandomName(prefix string, size int) (string, error) {
+ id := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, id); err != nil {
+ return "", err
+ }
+ if size > 64 {
+ size = 64
+ }
+ return prefix + hex.EncodeToString(id)[:size], nil
+}
+
+// ResolveRootfs ensures that the current working directory is
+// not a symlink and returns the absolute path to the rootfs
+func ResolveRootfs(uncleanRootfs string) (string, error) {
+ rootfs, err := filepath.Abs(uncleanRootfs)
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(rootfs)
+}
+
+// ExitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func ExitStatus(status syscall.WaitStatus) int {
+ if status.Signaled() {
+ return exitSignalOffset + int(status.Signal())
+ }
+ return status.ExitStatus()
+}
+
+// WriteJSON writes the provided struct v to w using standard json marshaling
+func WriteJSON(w io.Writer, v interface{}) error {
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+}
+
+// CleanPath makes a path safe for use with filepath.Join. This is done by not
+// only cleaning the path, but also (if the path is relative) adding a leading
+// '/' and cleaning it (then removing the leading '/'). This ensures that a
+// path resulting from prepending another path will always resolve to lexically
+// be a subdirectory of the prefixed path. This is all done lexically, so paths
+// that include symlinks won't be safe as a result of using CleanPath.
+func CleanPath(path string) string {
+ // Deal with empty strings nicely.
+ if path == "" {
+ return ""
+ }
+
+ // Ensure that all paths are cleaned (especially problematic ones like
+ // "/../../../../../" which can cause lots of issues).
+ path = filepath.Clean(path)
+
+ // If the path isn't absolute, we need to do more processing to fix paths
+ // such as "../../../../<etc>/some/path". We also shouldn't convert absolute
+ // paths to relative ones.
+ if !filepath.IsAbs(path) {
+ path = filepath.Clean(string(os.PathSeparator) + path)
+ // This can't fail, as (by definition) all paths are relative to root.
+ path, _ = filepath.Rel(string(os.PathSeparator), path)
+ }
+
+ // Clean the path again for good measure.
+ return filepath.Clean(path)
+}
+
+// SearchLabels searches a list of key-value pairs for the provided key and
+// returns the corresponding value. The pairs must be separated with '='.
+func SearchLabels(labels []string, query string) string {
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == query {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// Annotations returns the bundle path and user defined annotations from the
+// libcontianer state. We need to remove the bundle because that is a label
+// added by libcontainer.
+func Annotations(labels []string) (bundle string, userAnnotations map[string]string) {
+ userAnnotations = make(map[string]string)
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == "bundle" {
+ bundle = parts[1]
+ } else {
+ userAnnotations[parts[0]] = parts[1]
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
new file mode 100644
index 0000000..408918f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -0,0 +1,33 @@
+// +build !windows
+
+package utils
+
+import (
+ "io/ioutil"
+ "strconv"
+ "syscall"
+)
+
+func CloseExecFrom(minFd int) error {
+ fdList, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return err
+ }
+ for _, fi := range fdList {
+ fd, err := strconv.Atoi(fi.Name())
+ if err != nil {
+ // ignore non-numeric file names
+ continue
+ }
+
+ if fd < minFd {
+ // ignore descriptors lower than our specified minimum
+ continue
+ }
+
+ // intentionally ignore errors from syscall.CloseOnExec
+ syscall.CloseOnExec(fd)
+ // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS b/src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b382a04
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS
@@ -0,0 +1 @@
+Paul Borman <borman@google.com>
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/LICENSE b/src/kube2msb/vendor/github.com/pborman/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/dce.go b/src/kube2msb/vendor/github.com/pborman/uuid/dce.go
new file mode 100644
index 0000000..50a0f2d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/dce.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) UUID {
+ uuid := NewUUID()
+ if uuid != nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() UUID {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() UUID {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID or false.
+func (uuid UUID) Domain() (Domain, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return Domain(uuid[9]), true
+}
+
+// Id returns the id for a Version 2 UUID or false.
+func (uuid UUID) Id() (uint32, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return binary.BigEndian.Uint32(uuid[0:4]), true
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/doc.go b/src/kube2msb/vendor/github.com/pborman/uuid/doc.go
new file mode 100644
index 0000000..d8bd013
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The uuid package generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
+package uuid
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/hash.go b/src/kube2msb/vendor/github.com/pborman/uuid/hash.go
new file mode 100644
index 0000000..cdd4192
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known Name Space IDs and UUIDs
+var (
+ NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NIL = Parse("00000000-0000-0000-0000-000000000000")
+)
+
+// NewHash returns a new UUID dervied from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space)
+ h.Write([]byte(data))
+ s := h.Sum(nil)
+ uuid := make([]byte, 16)
+ copy(uuid, s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/json.go b/src/kube2msb/vendor/github.com/pborman/uuid/json.go
new file mode 100644
index 0000000..760580a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/json.go
@@ -0,0 +1,30 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "errors"
+
+func (u UUID) MarshalJSON() ([]byte, error) {
+ if len(u) == 0 {
+ return []byte(`""`), nil
+ }
+ return []byte(`"` + u.String() + `"`), nil
+}
+
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ if len(data) == 0 || string(data) == `""` {
+ return nil
+ }
+ if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("invalid UUID format")
+ }
+ data = data[1 : len(data)-1]
+ uu := Parse(string(data))
+ if uu == nil {
+ return errors.New("invalid UUID format")
+ }
+ *u = uu
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/node.go b/src/kube2msb/vendor/github.com/pborman/uuid/node.go
new file mode 100644
index 0000000..dd0a8ac
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/node.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "net"
+
+var (
+ interfaces []net.Interface // cached list of interfaces
+ ifname string // name of interface being used
+ nodeID []byte // hardware for version 1 UUIDs
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil && name != "" {
+ return false
+ }
+ }
+
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ if setNodeID(ifs.HardwareAddr) {
+ ifname = ifs.Name
+ return true
+ }
+ }
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ randomBits(nodeID)
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+ nid := make([]byte, 6)
+ copy(nid, nodeID)
+ return nid
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if setNodeID(id) {
+ ifname = "user"
+ return true
+ }
+ return false
+}
+
+func setNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ copy(nodeID, id)
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ if len(uuid) != 16 {
+ return nil
+ }
+ node := make([]byte, 6)
+ copy(node, uuid[10:])
+ return node
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/time.go b/src/kube2msb/vendor/github.com/pborman/uuid/time.go
new file mode 100644
index 0000000..7ebc9be
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/time.go
@@ -0,0 +1,132 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ mu sync.Mutex
+ lasttime uint64 // last time we returned
+ clock_seq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer mu.Unlock()
+ mu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clock_seq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence a new random
+// clock sequence is generated the first time a clock sequence is requested by
+// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
+// for
+func ClockSequence() int {
+ defer mu.Unlock()
+ mu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clock_seq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer mu.Unlock()
+ mu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ old_seq := clock_seq
+ clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if old_seq != clock_seq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. It returns false if uuid is not valid. The time is only well defined
+// for version 1 and 2 UUIDs.
+func (uuid UUID) Time() (Time, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time), true
+}
+
+// ClockSequence returns the clock sequence encoded in uuid. It returns false
+// if uuid is not valid. The clock sequence is only well defined for version 1
+// and 2 UUIDs.
+func (uuid UUID) ClockSequence() (int, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/util.go b/src/kube2msb/vendor/github.com/pborman/uuid/util.go
new file mode 100644
index 0000000..de40b10
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = []byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts the the first two hex bytes of x into a byte.
+func xtob(x string) (byte, bool) {
+ b1 := xvalues[x[0]]
+ b2 := xvalues[x[1]]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/uuid.go b/src/kube2msb/vendor/github.com/pborman/uuid/uuid.go
new file mode 100644
index 0000000..2920fae
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/uuid.go
@@ -0,0 +1,163 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID []byte
+
+// A Version represents a UUIDs version.
+type Version byte
+
+// A Variant represents a UUIDs variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// New returns a new random (version 4) UUID as a string. It is a convenience
+// function for NewRandom().String().
+func New() string {
+ return NewRandom().String()
+}
+
+// Parse decodes s into a UUID or returns nil. Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) UUID {
+ if len(s) == 36+9 {
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return nil
+ }
+ s = s[9:]
+ } else if len(s) != 36 {
+ return nil
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return nil
+ }
+ uuid := make([]byte, 16)
+ for i, x := range []int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ if v, ok := xtob(s[x:]); !ok {
+ return nil
+ } else {
+ uuid[i] = v
+ }
+ }
+ return uuid
+}
+
+// Equal returns true if uuid1 and uuid2 are equal.
+func Equal(uuid1, uuid2 UUID) bool {
+ return bytes.Equal(uuid1, uuid2)
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// Variant returns the variant encoded in uuid. It returns Invalid if
+// uuid is invalid.
+func (uuid UUID) Variant() Variant {
+ if len(uuid) != 16 {
+ return Invalid
+ }
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+ panic("unreachable")
+}
+
+// Version returns the verison of uuid. It returns false if uuid is not
+// valid.
+func (uuid UUID) Version() (Version, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return Version(uuid[6] >> 4), true
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implents io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/version1.go b/src/kube2msb/vendor/github.com/pborman/uuid/version1.go
new file mode 100644
index 0000000..0127eac
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/version1.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil.
+func NewUUID() UUID {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+
+ now, seq, err := GetTime()
+ if err != nil {
+ return nil
+ }
+
+ uuid := make([]byte, 16)
+
+ time_low := uint32(now & 0xffffffff)
+ time_mid := uint16((now >> 32) & 0xffff)
+ time_hi := uint16((now >> 48) & 0x0fff)
+ time_hi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], time_low)
+ binary.BigEndian.PutUint16(uuid[4:], time_mid)
+ binary.BigEndian.PutUint16(uuid[6:], time_hi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID)
+
+ return uuid
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/version4.go b/src/kube2msb/vendor/github.com/pborman/uuid/version4.go
new file mode 100644
index 0000000..b3d4a36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/version4.go
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+// Random returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() UUID {
+ uuid := make([]byte, 16)
+ randomBits([]byte(uuid))
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE b/src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE b/src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..37e4a7d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,28 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+goautoneg
+http://bitbucket.org/ww/goautoneg
+Copyright 2011, Open Knowledge Foundation Ltd.
+See README.txt for license details.
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..81032be
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1,53 @@
+# Overview
+This is the [Prometheus](http://www.prometheus.io) telemetric
+instrumentation client [Go](http://golang.org) client library. It
+enable authors to define process-space metrics for their servers and
+expose them through a web service interface for extraction,
+aggregation, and a whole slew of other post processing techniques.
+
+# Installing
+ $ go get github.com/prometheus/client_golang/prometheus
+
+# Example
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ indexed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "my_company",
+ Subsystem: "indexer",
+ Name: "documents_indexed",
+ Help: "The number of documents indexed.",
+ })
+ size = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "my_company",
+ Subsystem: "storage",
+ Name: "documents_total_size_bytes",
+ Help: "The total size of all documents in the storage.",
+ })
+)
+
+func main() {
+ http.Handle("/metrics", prometheus.Handler())
+
+ indexed.Inc()
+ size.Set(5)
+
+ http.ListenAndServe(":8080", nil)
+}
+
+func init() {
+ prometheus.MustRegister(indexed)
+ prometheus.MustRegister(size)
+}
+```
+
+# Documentation
+
+[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..c046880
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+//
+// The stock metrics provided by this package (like Gauge, Counter, Summary) are
+// also Collectors (which only ever collect one metric, namely itself). An
+// implementer of Collector may, however, collect multiple metrics in a
+// coordinated fashion and/or create metrics on the fly. Examples for collectors
+// already implemented in this library are the metric vectors (i.e. collection
+// of multiple instances of the same Metric but with different label values)
+// like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by Prometheus when collecting metrics. The
+ // implementation sends each collected metric via the provided channel
+ // and returns once the last metric has been sent. The descriptor of
+ // each sent metric is one of those returned by Describe. Returned
+ // metrics that share the same descriptor must differ in their variable
+ // label values. This method may be called concurrently and must
+ // therefore be implemented in a concurrency safe way. Blocking occurs
+ // at the expense of total performance of rendering all registered
+ // metrics. Ideally, Collector implementations support concurrent
+ // readers.
+ Collect(chan<- Metric)
+}
+
+// SelfCollector implements Collector for a single Metric so that that the
+// Metric collects itself. Add it as an anonymous field to a struct that
+// implements Metric, and call Init with the Metric itself as an argument.
+type SelfCollector struct {
+ self Metric
+}
+
+// Init provides the SelfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *SelfCollector) Init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *SelfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *SelfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..a2952d1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,175 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "hash/fnv"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.Init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.Init(result) // Init self-collection.
+ return result
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..fcde784
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,201 @@
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ h := fnv.New64a()
+ var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
+ for _, val := range labelValues {
+ b.Reset()
+ b.WriteString(val)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.id = h.Sum64()
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ h.Reset()
+ b.Reset()
+ b.WriteString(help)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ for _, labelName := range labelNames {
+ b.Reset()
+ b.WriteString(labelName)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.dimHash = h.Sum64()
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..425fe87
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides embeddable metric primitives for servers and
+// standardized exposition of telemetry through a web services interface.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// To expose metrics registered with the Prometheus registry, an HTTP server
+// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+//
+// http.Handle("/metrics", prometheus.Handler())
+//
+// As a starting point a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// })
+// )
+//
+// func init() {
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.Inc()
+//
+// http.Handle("/metrics", prometheus.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter.
+// It also exports some stats about the HTTP usage of the /metrics
+// endpoint. (See the Handler function for more detail.)
+//
+// Two more advanced metric types are the Summary and Histogram.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// Those are all the parts needed for basic usage. Detailed documentation and
+// examples are provided below.
+//
+// Everything else this package offers is essentially for "power users" only. A
+// few pointers to "power user features":
+//
+// All the various ...Opts structs have a ConstLabels field for labels that
+// never change their value (which is only useful under special circumstances,
+// see documentation of the Opts type).
+//
+// The Untyped metric behaves like a Gauge, but signals the Prometheus server
+// not to assume anything about its type.
+//
+// Functions to fine-tune how the metric registry works: EnableCollectChecks,
+// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
+//
+// For custom metric collection, there are two entry points: Custom Metric
+// implementations and custom Collector implementations. A Metric is the
+// fundamental unit in the Prometheus data model: a sample at a point in time
+// together with its meta-data (like its fully-qualified name and any number of
+// pairs of label name and label value) that knows how to marshal itself into a
+// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
+// gets registered with the Prometheus registry and manages the collection of
+// one or more Metrics. Many parts of this package are building blocks for
+// Metrics and Collectors. Desc is the metric descriptor, actually used by all
+// metrics under the hood, and by Collectors to describe the Metrics to be
+// collected, but only to be dealt with by users if they implement their own
+// Metrics or Collectors. To create a Desc, the BuildFQName function will come
+// in handy. Other useful components for Metric and Collector implementation
+// include: LabelPairSorter to sort the DTO version of label pairs,
+// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
+// collection time, MetricVec to bundle custom Metrics into a metric vector
+// Collector, SelfCollector to make a custom Metric collect itself.
+//
+// A good example for a custom Collector is the ExpVarCollector included in this
+// package, which exports variables exported via the "expvar" package as
+// Prometheus metrics.
+package prometheus
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
new file mode 100644
index 0000000..0f7630d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+// ExpvarCollector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the ExpvarCollector is inherently
+// slow. Thus, the ExpvarCollector is probably great for experiments and
+// prototying, but you should seriously consider a more direct implementation of
+// Prometheus metrics for monitoring production systems.
+//
+// Use NewExpvarCollector to create new instances.
+type ExpvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
+// to be registered with the Prometheus registry.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
+ return &ExpvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..ba8a402
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..85fa20b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,50 @@
+package prometheus
+
+import (
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() *goCollector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Name: "go_goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..f98a41b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,450 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+var (
+ // DefBuckets are the default Histogram buckets. The default buckets are
+ // tailored to broadly measure the response time (in seconds) of a
+ // network service. Most likely, however, you will be required to define
+ // buckets customized to your use case.
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.Init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ SelfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000..eabe602
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,361 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
+// flexibility (at the cost of a more complex call syntax). As
+// InstrumentHandler, this function registers four metric collectors, but it
+// uses the provided SummaryOpts to create them. However, the fields "Name" and
+// "Help" in the SummaryOpts are ignored. "Name" is replaced by
+// "requests_total", "request_duration_microseconds", "request_size_bytes", and
+// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
+// more flexibility (at the cost of a more complex call syntax). See
+// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..86fd81c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
+// Untyped, and Summary. Users can implement their own Metric types, but that
+// should be rarely needed. See the example for SelfCollector, which is also an
+// example for a user-implemented Metric.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Implementers of custom Metric types must observe concurrency safety
+ // as reads of this metric may occur at any time, and any blocking
+ // occurs at the expense of total performance of rendering all
+ // registered metrics. Ideally Metric implementations should support
+ // concurrent readers.
+ //
+ // The Prometheus client library attempts to minimize memory allocations
+ // and will provide a pre-existing reset dto.Metric pointer. Prometheus
+ // may recycle the dto.Metric proto message, so Metric implementations
+ // should just populate the provided dto.Metric and then should not keep
+ // any reference to it.
+ //
+ // While populating dto.Metric, labels must be sorted lexicographically.
+ // (Implementers may find LabelPairSorter useful for that.)
+ Write(*dto.Metric) error
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..d8cf0ed
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) *processCollector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) *processCollector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go
new file mode 100644
index 0000000..1c33848
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+// Push triggers a metric collection by the default registry and pushes all
+// collected metrics to the Pushgateway specified by addr. See the Pushgateway
+// documentation for detailed implications of the job and instance
+// parameter. instance can be left empty. You can use just host:port or ip:port
+// as url, in which case 'http://' is added automatically. You can also include
+// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and instance will
+// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
+// to push to the Pushgateway.)
+func Push(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "PUT")
+}
+
+// PushAdd works like Push, but only previously pushed metrics with the same
+// name (and the same job and instance) will be replaced. (It uses HTTP method
+// 'POST' to push to the Pushgateway.)
+func PushAdd(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "POST")
+}
+
+// PushCollectors works like Push, but it does not collect from the default
+// registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "PUT", collectors...)
+}
+
+// PushAddCollectors works like PushAdd, but it does not collect from the
+// default registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "POST", collectors...)
+}
+
+func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
+ r := newRegistry()
+ for _, collector := range collectors {
+ if _, err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return r.Push(job, instance, url, method)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..5970aae
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,726 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ defRegistry = newDefaultRegistry()
+ errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
+)
+
+// Constants relevant to the HTTP interface.
+const (
+ // APIVersion is the version of the format of the exported data. This
+ // will match this library's version, which subscribes to the Semantic
+ // Versioning scheme.
+ APIVersion = "0.0.4"
+
+ // DelimitedTelemetryContentType is the content type set on telemetry
+ // data responses in delimited protobuf format.
+ DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
+ // TextTelemetryContentType is the content type set on telemetry data
+ // responses in text format.
+ TextTelemetryContentType = `text/plain; version=` + APIVersion
+ // ProtoTextTelemetryContentType is the content type set on telemetry
+ // data responses in protobuf text format. (Only used for debugging.)
+ ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
+ // ProtoCompactTextTelemetryContentType is the content type set on
+ // telemetry data responses in protobuf compact text format. (Only used
+ // for debugging.)
+ ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
+
+ // Constants for object pools.
+ numBufs = 4
+ numMetricFamilies = 1000
+ numMetrics = 10000
+
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+
+ acceptEncodingHeader = "Accept-Encoding"
+ acceptHeader = "Accept"
+)
+
+// Handler returns the HTTP handler for the global Prometheus registry. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name). Usually the handler is used to handle the "/metrics" endpoint.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", defRegistry)
+}
+
+// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
+// handler is not instrumented. This is useful if no instrumentation is desired
+// (for whatever reason) or if the instrumentation has to happen with a
+// different handler name (or with a different instrumentation approach
+// altogether). See the InstrumentHandler example.
+func UninstrumentedHandler() http.Handler {
+ return defRegistry
+}
+
+// Register registers a new Collector to be included in metrics collection. It
+// returns an error if the descriptors provided by the Collector are invalid or
+// if they - in combination with descriptors of already registered Collectors -
+// do not fulfill the consistency and uniqueness criteria described in the Desc
+// documentation.
+//
+// Do not register the same Collector multiple times concurrently. (Registering
+// the same Collector twice would result in an error anyway, but on top of that,
+// it is not safe to do so concurrently.)
+func Register(m Collector) error {
+ _, err := defRegistry.Register(m)
+ return err
+}
+
+// MustRegister works like Register but panics where Register would have
+// returned an error.
+func MustRegister(m Collector) {
+ err := Register(m)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// RegisterOrGet works like Register but does not return an error if a Collector
+// is registered that equals a previously registered Collector. (Two Collectors
+// are considered equal if their Describe method yields the same set of
+// descriptors.) Instead, the previously registered Collector is returned (which
+// is helpful if the new and previously registered Collectors are equal but not
+// identical, i.e. not pointers to the same object).
+//
+// As for Register, it is still not safe to call RegisterOrGet with the same
+// Collector multiple times concurrently.
+func RegisterOrGet(m Collector) (Collector, error) {
+ return defRegistry.RegisterOrGet(m)
+}
+
+// MustRegisterOrGet works like Register but panics where RegisterOrGet would
+// have returned an error.
+func MustRegisterOrGet(m Collector) Collector {
+ existing, err := RegisterOrGet(m)
+ if err != nil {
+ panic(err)
+ }
+ return existing
+}
+
+// Unregister unregisters the Collector that equals the Collector passed in as
+// an argument. (Two Collectors are considered equal if their Describe method
+// yields the same set of descriptors.) The function returns whether a Collector
+// was unregistered.
+func Unregister(c Collector) bool {
+ return defRegistry.Unregister(c)
+}
+
+// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
+// are collected. The hook function must be set before metrics collection begins
+// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
+// MetricFamily protobufs returned by the hook function are merged with the
+// metrics collected in the usual way.
+//
+// This is a way to directly inject MetricFamily protobufs managed and owned by
+// the caller. The caller has full responsibility. As no registration of the
+// injected metrics has happened, there is no descriptor to check against, and
+// there are no registration-time checks. If collect-time checks are disabled
+// (see function EnableCollectChecks), no sanity checks are performed on the
+// returned protobufs at all. If collect-checks are enabled, type and uniqueness
+// checks are performed, but no further consistency checks (which would require
+// knowledge of a metric descriptor).
+//
+// Sorting concerns: The caller is responsible for sorting the label pairs in
+// each metric. However, the order of metrics will be sorted by the registry as
+// it is required anyway after merging with the metric families collected
+// conventionally.
+//
+// The function must be callable at any time and concurrently.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ defRegistry.metricFamilyInjectionHook = hook
+}
+
+// PanicOnCollectError sets the behavior whether a panic is caused upon an error
+// while metrics are collected and served to the HTTP endpoint. By default, an
+// internal server error (status code 500) is served with an error message.
+func PanicOnCollectError(b bool) {
+ defRegistry.panicOnCollectError = b
+}
+
+// EnableCollectChecks enables (or disables) additional consistency checks
+// during metrics collection. These additional checks are not enabled by default
+// because they inflict a performance penalty and the errors they check for can
+// only happen if the used Metric and Collector types have internal programming
+// errors. It can be helpful to enable these checks while working with custom
+// Collectors or Metrics whose correctness is not well established yet.
+func EnableCollectChecks(b bool) {
+ defRegistry.collectChecksEnabled = b
+}
+
+// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
+// certain encoding. It returns the number of bytes written and any error
+// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
+// are encoders.
+type encoder func(io.Writer, *dto.MetricFamily) (int, error)
+
+type registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ bufPool chan *bytes.Buffer
+ metricFamilyPool chan *dto.MetricFamily
+ metricPool chan *dto.Metric
+ metricFamilyInjectionHook func() []*dto.MetricFamily
+
+ panicOnCollectError, collectChecksEnabled bool
+}
+
+func (r *registry) Register(c Collector) (Collector, error) {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ newDescIDs := map[uint64]struct{}{}
+ newDimHashesByName := map[string]uint64{}
+ var collectorID uint64 // Just a sum of all desc IDs.
+ var duplicateDescErr error
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return nil, errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return existing, errAlreadyReg
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return nil, duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return c, nil
+}
+
+func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
+ existing, err := r.Register(m)
+ if err != nil && err != errAlreadyReg {
+ return nil, err
+ }
+ return existing, nil
+}
+
+func (r *registry) Unregister(c Collector) bool {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ descIDs := map[uint64]struct{}{}
+ var collectorID uint64 // Just a sum of the desc IDs.
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+func (r *registry) Push(job, instance, pushURL, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
+ if instance != "" {
+ pushURL += "/instances/" + url.QueryEscape(instance)
+ }
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ return err
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
+ }
+ return nil
+}
+
+func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ contentType := expfmt.Negotiate(req.Header)
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+}
+
+func (r *registry) writePB(encoder expfmt.Encoder) error {
+ var metricHashes map[uint64]struct{}
+ if r.collectChecksEnabled {
+ metricHashes = make(map[uint64]struct{})
+ }
+ metricChan := make(chan Metric, capMetricChan)
+ wg := sync.WaitGroup{}
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if !ok {
+ metricFamily = r.getMetricFamily()
+ defer r.giveMetricFamily(metricFamily)
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ dtoMetric := r.getMetric()
+ defer r.giveMetric(dtoMetric)
+ if err := metric.Write(dtoMetric); err != nil {
+ // TODO: Consider different means of error reporting so
+ // that a single erroneous metric could be skipped
+ // instead of blowing up the whole collection.
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ switch {
+ case metricFamily.Type != nil:
+ // Type already set. We are good.
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+
+ if r.metricFamilyInjectionHook != nil {
+ for _, mf := range r.metricFamilyInjectionHook() {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if !exists {
+ metricFamiliesByName[mf.GetName()] = mf
+ if r.collectChecksEnabled {
+ for _, m := range mf.Metric {
+ if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+ for _, m := range mf.Metric {
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+
+ // Now that MetricFamilies are all set, sort their Metrics
+ // lexicographically by their label values.
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+
+ // Write out MetricFamilies sorted by their name.
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name := range metricFamiliesByName {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := fnv.New64a()
+ var buf bytes.Buffer
+ buf.WriteString(metricFamily.GetName())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check. Label pairs must be sorted by contract. But the point of this
+ // method is to check for contract violations. So we better do the sort
+ // now.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ buf.Reset()
+ buf.WriteString(lp.GetValue())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ }
+ metricHash := h.Sum64()
+ if _, exists := metricHashes[metricHash]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ metricHashes[metricHash] = struct{}{}
+
+ if desc == nil {
+ return nil // Nothing left to check if we have no desc.
+ }
+
+ // Desc consistency with metric family.
+ if metricFamily.GetName() != desc.fqName {
+ return fmt.Errorf(
+ "collected metric %s %s has name %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
+ )
+ }
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+
+ r.mtx.RLock() // Remaining checks need the read lock.
+ defer r.mtx.RUnlock()
+
+ // Is the desc registered?
+ if _, exist := r.descIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+
+ return nil
+}
+
+func (r *registry) getBuf() *bytes.Buffer {
+ select {
+ case buf := <-r.bufPool:
+ return buf
+ default:
+ return &bytes.Buffer{}
+ }
+}
+
+func (r *registry) giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ select {
+ case r.bufPool <- buf:
+ default:
+ }
+}
+
+func (r *registry) getMetricFamily() *dto.MetricFamily {
+ select {
+ case mf := <-r.metricFamilyPool:
+ return mf
+ default:
+ return &dto.MetricFamily{}
+ }
+}
+
+func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
+ mf.Reset()
+ select {
+ case r.metricFamilyPool <- mf:
+ default:
+ }
+}
+
+func (r *registry) getMetric() *dto.Metric {
+ select {
+ case m := <-r.metricPool:
+ return m
+ default:
+ return &dto.Metric{}
+ }
+}
+
+func (r *registry) giveMetric(m *dto.Metric) {
+ m.Reset()
+ select {
+ case r.metricPool <- m:
+ default:
+ }
+}
+
+func newRegistry() *registry {
+ return &registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ bufPool: make(chan *bytes.Buffer, numBufs),
+ metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
+ metricPool: make(chan *dto.Metric, numMetrics),
+ }
+}
+
+func newDefaultRegistry() *registry {
+ r := newRegistry()
+ r.Register(NewProcessCollector(os.Getpid(), ""))
+ r.Register(NewGoCollector())
+ return r
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..fe81e00
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,540 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var (
+ // DefObjectives are the default Summary quantile values.
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
+// method of perk/quantile is actually not working as advertised - and it might
+// be unfixable, as the underlying algorithm is apparently not capable of
+// merging summaries in the first place. To avoid using Merge, we are currently
+// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.Init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ SelfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..c65ab1c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..b54ac11
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..a1f3bdf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,247 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "sync"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects not only children, but also hash and buf.
+ children map[uint64]Metric
+ desc *Desc
+
+ // hash is our own hash instance to avoid repeated allocations.
+ hash hash.Hash64
+ // buf is used to copy string contents into it for hashing,
+ // again to avoid allocations.
+ buf bytes.Buffer
+
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metric := range m.children {
+ ch <- metric
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+ lvs := make([]string, len(labels))
+ for i, label := range m.desc.variableLabels {
+ lvs[i] = labels[label]
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, val := range vals {
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
+ metric, ok := m.children[hash]
+ if !ok {
+ // Copy labelValues. Otherwise, they would be allocated even if we don't go
+ // down this code path.
+ copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
+ metric = m.newMetric(copiedLabelValues...)
+ m.children[hash] = metric
+ }
+ return metric
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE b/src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE b/src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..b065f86
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/LICENSE b/src/kube2msb/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/NOTICE b/src/kube2msb/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..a98696d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,433 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const (
+ textType = "text/plain"
+ jsonType = "application/json"
+ )
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+
+ case jsonType:
+ var prometheusAPIVersion string
+
+ if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
+ prometheusAPIVersion = params["version"]
+ } else {
+ prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
+ }
+
+ switch prometheusAPIVersion {
+ case "0.0.2", "":
+ return fmtJSON2
+ default:
+ return FmtUnknown
+ }
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ case fmtJSON2:
+ return newJSON2Decoder(r)
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protcol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..11839ed
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..366fbde
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+
+ // fmtJSON2 is hidden as it is deprecated.
+ fmtJSON2 Format = `application/json; version=0.0.2`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..dc2eede
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go
new file mode 100644
index 0000000..cf86545
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go
@@ -0,0 +1,174 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
+)
+
+type json2Decoder struct {
+ dec *json.Decoder
+ fams []*dto.MetricFamily
+}
+
+func newJSON2Decoder(r io.Reader) Decoder {
+ return &json2Decoder{
+ dec: json.NewDecoder(r),
+ }
+}
+
+type histogram002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Values map[string]float64 `json:"value"`
+}
+
+type counter002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Value float64 `json:"value"`
+}
+
+func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
+ labels := base.Clone().Merge(ext)
+ delete(labels, model.MetricNameLabel)
+
+ names := make([]string, 0, len(labels))
+ for ln := range labels {
+ names = append(names, string(ln))
+ }
+ sort.Strings(names)
+
+ pairs := make([]*dto.LabelPair, 0, len(labels))
+
+ for _, ln := range names {
+ if !model.LabelNameRE.MatchString(ln) {
+ return nil, fmt.Errorf("invalid label name %q", ln)
+ }
+ lv := labels[model.LabelName(ln)]
+
+ pairs = append(pairs, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(string(lv)),
+ })
+ }
+
+ return pairs, nil
+}
+
+func (d *json2Decoder) more() error {
+ var entities []struct {
+ BaseLabels model.LabelSet `json:"baseLabels"`
+ Docstring string `json:"docstring"`
+ Metric struct {
+ Type string `json:"type"`
+ Values json.RawMessage `json:"value"`
+ } `json:"metric"`
+ }
+
+ if err := d.dec.Decode(&entities); err != nil {
+ return err
+ }
+ for _, e := range entities {
+ f := &dto.MetricFamily{
+ Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
+ Help: proto.String(e.Docstring),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{},
+ }
+
+ d.fams = append(d.fams, f)
+
+ switch e.Metric.Type {
+ case "counter", "gauge":
+ var values []counter002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, ctr := range values {
+ labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
+ if err != nil {
+ return err
+ }
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: labels,
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(ctr.Value),
+ },
+ })
+ }
+
+ case "histogram":
+ var values []histogram002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, hist := range values {
+ quants := make([]string, 0, len(values))
+ for q := range hist.Values {
+ quants = append(quants, q)
+ }
+
+ sort.Strings(quants)
+
+ for _, q := range quants {
+ value := hist.Values[q]
+ // The correct label is "quantile" but to not break old expressions
+ // this remains "percentile"
+ hist.Labels["percentile"] = model.LabelValue(q)
+
+ labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
+ if err != nil {
+ return err
+ }
+
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: labels,
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(value),
+ },
+ })
+ }
+ }
+
+ default:
+ return fmt.Errorf("unknown metric type %q", e.Metric.Type)
+ }
+ }
+ return nil
+}
+
+// Decode implements the Decoder interface.
+func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
+ if len(d.fams) == 0 {
+ if err := d.more(); err != nil {
+ return err
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..0bb9c14
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,305 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..bd170b1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000..648b38c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/alert.go b/src/kube2msb/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..35e739c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go b/src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go b/src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..038fc1c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/labels.go b/src/kube2msb/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..3b72e7f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,206 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go b/src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..5f931cd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/metric.go b/src/kube2msb/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..a5da59a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/model.go b/src/kube2msb/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..88f013a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus componenets and libraries.
+package model
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/signature.go b/src/kube2msb/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..8762b13
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/silence.go b/src/kube2msb/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..7538e29
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/time.go b/src/kube2msb/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..548968a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/value.go b/src/kube2msb/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..dbf5d10
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,403 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md b/src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 0000000..6eb1935
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,11 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt <ts@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Tobias Schmidt <ts@soundcloud.com>
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..5705f0f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE b/src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE b/src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/README.md b/src/kube2msb/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..761d31c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,7 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/doc.go b/src/kube2msb/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..e2acd6d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/fs.go b/src/kube2msb/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..838474a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,36 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+func (fs FS) stat(p string) (os.FileInfo, error) {
+ return os.Stat(path.Join(string(fs), p))
+}
+
+func (fs FS) open(p string) (*os.File, error) {
+ return os.Open(path.Join(string(fs), p))
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/proc.go b/src/kube2msb/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..21445cf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,149 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process.
+func Self() (Proc, error) {
+ return NewProc(os.Getpid())
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently avaible processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+
+ return fs.AllProcs()
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
+ return Proc{}, err
+ }
+
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently avaible processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := fs.open("")
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := p.open("cmdline")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := p.open("fd")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) open(pa string) (*os.File, error) {
+ return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..9f080b9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,111 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits.
+type ProcLimits struct {
+ CPUTime int
+ FileSize int
+ DataSize int
+ StackSize int
+ CoreFileSize int
+ ResidentSet int
+ Processes int
+ OpenFiles int
+ LockedMemory int
+ AddressSpace int
+ FileLocks int
+ PendingSignals int
+ MsqqueueSize int
+ NicePriority int
+ RealtimePriority int
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := p.open("limits")
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..30a403b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
+// required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.
+// After much research it was determined that USER_HZ is actually hardcoded to
+// 100 on all Go-supported platforms as of the time of this writing. This is
+// why we decided to hardcode it here as well. It is not impossible that there
+// could be systems with exceptions, but they should be very exotic edge cases,
+// and in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := p.open("stat")
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/stat.go b/src/kube2msb/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..26fefb0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := fs.open("stat")
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/LICENSE b/src/kube2msb/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/README.md b/src/kube2msb/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..e74dd50
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,256 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helpers function to get values later if you have the FlagSet but
+it was difficult to keep up with all of the the flag pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP("boolname", "b", true, "help message")
+}
+flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/ogier/pflag
+[3]: http://godoc.org/github.com/ogier/pflag
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/bool.go b/src/kube2msb/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..d272e40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,97 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/count.go b/src/kube2msb/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..7b1f142
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,97 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ // -1 means that no specific value was passed, so increment
+ if v == -1 {
+ *i = countValue(*i + 1)
+ } else {
+ *i = countValue(v)
+ }
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return fmt.Sprintf("%v", *i) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "-1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/duration.go b/src/kube2msb/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/flag.go b/src/kube2msb/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..fd91440
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,836 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/ogier/pflag"
+
+ There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP("boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ formal map[NormalizedName]*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ exitOnError bool // does the program exit if there's an error?
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string //default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ for k, v := range f.formal {
+ delete(f.formal, k)
+ nname := f.normalizeFlagName(string(k))
+ f.formal[nname] = v
+ v.Name = string(nname)
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.formal) {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags definied.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.actual) {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if len(usageMessage) == 0 {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if len(usageMessage) == 0 {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ err := flag.Value.Set(value)
+ if err != nil {
+ return err
+ }
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ flag.Changed = true
+ if len(flag.Deprecated) > 0 {
+ fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprintf(f.out(), "%s", usages)
+}
+
+// FlagUsages Returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ x := new(bytes.Buffer)
+
+ f.VisitAll(func(flag *Flag) {
+ if len(flag.Deprecated) > 0 || flag.Hidden {
+ return
+ }
+ format := ""
+ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
+ format = " -%s, --%s"
+ } else {
+ format = " %s --%s"
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ format = format + "["
+ }
+ if flag.Value.Type() == "string" {
+ // put quotes on the value
+ format = format + "=%q"
+ } else {
+ format = format + "=%s"
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ format = format + "]"
+ }
+ format = format + ": %s\n"
+ shorthand := flag.Shorthand
+ if len(flag.ShorthandDeprecated) > 0 {
+ shorthand = ""
+ }
+ fmt.Fprintf(x, format, shorthand, flag.Name, flag.DefValue, flag.Usage)
+ })
+
+ return x.String()
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ _ = f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ // Call normalizeFlagName function only once
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadythere := f.formal[normalizedFlagName]
+ if alreadythere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+
+ if len(flag.Shorthand) == 0 {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
+ panic("shorthand is more than one character")
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ old, alreadythere := f.shorthands[c]
+ if alreadythere {
+ fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
+ panic("shorthand redefinition")
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
+ if err := flag.Value.Set(value); err != nil {
+ return f.failf("invalid argument %q for %s: %v", value, origArg, err)
+ }
+ // mark as visited for Visit()
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[f.normalizeFlagName(flag.Name)] = flag
+ flag.Changed = true
+ if len(flag.Deprecated) > 0 {
+ fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
+ fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+ return nil
+}
+
+func containsShorthand(arg, shorthand string) bool {
+ // filter out flags --<flag_name>
+ if strings.HasPrefix(arg, "-") {
+ return false
+ }
+ arg = strings.SplitN(arg, "=", 2)[0]
+ return strings.Contains(arg, shorthand)
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, alreadythere := f.formal[f.normalizeFlagName(name)]
+ if !alreadythere {
+ if name == "help" { // special case for nice help message.
+ f.usage()
+ return a, ErrHelp
+ }
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if len(flag.NoOptDefVal) > 0 {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+ err = f.setFlag(flag, value, s)
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, alreadythere := f.shorthands[c]
+ if !alreadythere {
+ if c == 'h' { // special case for nice help message.
+ f.usage()
+ err = ErrHelp
+ return
+ }
+ //TODO continue on error
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ value = shorthands[2:]
+ outShorts = ""
+ } else if len(flag.NoOptDefVal) > 0 {
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+ err = f.setFlag(flag, value, shorthands)
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args)
+ } else {
+ args, err = f.parseShortArg(s, args)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+ err := f.parseArgs(arguments)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// The default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/float32.go b/src/kube2msb/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..7683fae
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/float64.go b/src/kube2msb/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..50fbf8c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,87 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go b/src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..a8c24ef
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,97 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var _ = fmt.Print
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int.go b/src/kube2msb/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..b656036
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,87 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int32.go b/src/kube2msb/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..41659a9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int64.go b/src/kube2msb/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..6e67e38
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,87 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int8.go b/src/kube2msb/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..400db21
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go b/src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..1e7c9ed
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/ip.go b/src/kube2msb/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..88a1743
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,96 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+var _ = strings.TrimSpace
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go b/src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go b/src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..149b764
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,100 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+var _ = strings.TrimSpace
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/string.go b/src/kube2msb/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..e296136
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,82 @@
+package pflag
+
+import "fmt"
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go b/src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..b53648b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,111 @@
+package pflag
+
+import (
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+var _ = fmt.Fprint
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ v, err := csvReader.Read()
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" }
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = strings.Trim(sval, "[]")
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ v := strings.Split(sval, ",")
+ return v, nil
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..e142b49
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint16.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..5c96c19
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,89 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) }
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint32.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..294fcaa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,89 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint16 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) }
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint64.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..c681885
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint8.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..26db418
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/LICENSE b/src/kube2msb/vendor/github.com/ugorji/go/LICENSE
new file mode 100644
index 0000000..95a0f05
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2015 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go
new file mode 100644
index 0000000..bd7361c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go
@@ -0,0 +1,199 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package understands the 'unsafe' tag, to allow using unsafe semantics:
+
+ - When decoding into a struct, you need to read the field name as a string
+ so you can find the struct field it is mapped to.
+ Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
+
+To install using unsafe, pass the 'unsafe' tag:
+
+ go get -tags=unsafe github.com/ugorji/go/codec
+
+For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Multiple conversions:
+ Package coerces types where appropriate
+ e.g. decode an int in the stream into a float, etc.
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosynchracies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+*/
+package codec
+
+// Benefits of go-codec:
+//
+// - encoding/json always reads whole file into memory first.
+// This makes it unsuitable for parsing very large files.
+// - encoding/xml cannot parse into a map[string]interface{}
+// I found this out on reading https://github.com/clbanning/mxj
+
+// TODO:
+//
+// - optimization for codecgen:
+// if len of entity is <= 3 words, then support a value receiver for encode.
+// - (En|De)coder should store an error when it occurs.
+// Until reset, subsequent calls return that error that was stored.
+// This means that free panics must go away.
+// All errors must be raised through errorf method.
+// - Decoding using a chan is good, but incurs concurrency costs.
+// This is because there's no fast way to use a channel without it
+// having to switch goroutines constantly.
+// Callback pattern is still the best. Maybe cnsider supporting something like:
+// type X struct {
+// Name string
+// Ys []Y
+// Ys chan <- Y
+// Ys func(Y) -> call this function for each entry
+// }
+// - Consider adding a isZeroer interface { isZero() bool }
+// It is used within isEmpty, for omitEmpty support.
+// - Consider making Handle used AS-IS within the encoding/decoding session.
+// This means that we don't cache Handle information within the (En|De)coder,
+// except we really need it at Reset(...)
+// - Consider adding math/big support
+// - Consider reducing the size of the generated functions:
+// Maybe use one loop, and put the conditionals in the loop.
+// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/README.md b/src/kube2msb/vendor/github.com/ugorji/go/codec/README.md
new file mode 100644
index 0000000..a790a52
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/README.md
@@ -0,0 +1,148 @@
+# Codec
+
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package understands the `unsafe` tag, to allow using unsafe semantics:
+
+ - When decoding into a struct, you need to read the field name as a string
+ so you can find the struct field it is mapped to.
+ Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion.
+
+To use it, you must pass the `unsafe` tag during install:
+
+```
+go install -tags=unsafe github.com/ugorji/go/codec
+```
+
+Online documentation: http://godoc.org/github.com/ugorji/go/codec
+Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Multiple conversions:
+ Package coerces types where appropriate
+ e.g. decode an int in the stream into a float, etc.
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosynchracies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+## Usage
+
+Typical usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go
new file mode 100644
index 0000000..766d26c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go
@@ -0,0 +1,922 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
+
+// vd as low 4 bits (there are 16 slots)
+const (
+ bincVdSpecial byte = iota
+ bincVdPosInt
+ bincVdNegInt
+ bincVdFloat
+
+ bincVdString
+ bincVdByteArray
+ bincVdArray
+ bincVdMap
+
+ bincVdTimestamp
+ bincVdSmallInt
+ bincVdUnicodeOther
+ bincVdSymbol
+
+ bincVdDecimal
+ _ // open slot
+ _ // open slot
+ bincVdCustomExt = 0x0f
+)
+
+const (
+ bincSpNil byte = iota
+ bincSpFalse
+ bincSpTrue
+ bincSpNan
+ bincSpPosInf
+ bincSpNegInf
+ bincSpZeroFloat
+ bincSpZero
+ bincSpNegOne
+)
+
+const (
+ bincFlBin16 byte = iota
+ bincFlBin32
+ _ // bincFlBin32e
+ bincFlBin64
+ _ // bincFlBin64e
+ // others not currently supported
+)
+
+type bincEncDriver struct {
+ e *Encoder
+ w encWriter
+ m map[string]uint16 // symbols
+ b [scratchByteArrayLen]byte
+ s uint16 // symbols sequencer
+ encNoSeparator
+}
+
+func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
+ if rt == timeTypId {
+ var bs []byte
+ switch x := v.(type) {
+ case time.Time:
+ bs = encodeTime(x)
+ case *time.Time:
+ bs = encodeTime(*x)
+ default:
+ e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
+ }
+ e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+ e.w.writeb(bs)
+ }
+}
+
+func (e *bincEncDriver) EncodeNil() {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNil)
+}
+
+func (e *bincEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
+ } else {
+ e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
+ }
+}
+
+func (e *bincEncDriver) EncodeFloat32(f float32) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncDriver) EncodeFloat64(f float64) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ bigen.PutUint64(e.b[:8], math.Float64bits(f))
+ if bincDoPrune {
+ i := 7
+ for ; i >= 0 && (e.b[i] == 0); i-- {
+ }
+ i++
+ if i <= 6 {
+ e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+ e.w.writen1(byte(i))
+ e.w.writeb(e.b[:i])
+ return
+ }
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin64)
+ e.w.writeb(e.b[:8])
+}
+
+func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
+ if lim == 4 {
+ bigen.PutUint32(e.b[:lim], uint32(v))
+ } else {
+ bigen.PutUint64(e.b[:lim], v)
+ }
+ if bincDoPrune {
+ i := pruneSignExt(e.b[:lim], pos)
+ e.w.writen1(bd | lim - 1 - byte(i))
+ e.w.writeb(e.b[i:lim])
+ } else {
+ e.w.writen1(bd | lim - 1)
+ e.w.writeb(e.b[:lim])
+ }
+}
+
+func (e *bincEncDriver) EncodeInt(v int64) {
+ const nbd byte = bincVdNegInt << 4
+ if v >= 0 {
+ e.encUint(bincVdPosInt<<4, true, uint64(v))
+ } else if v == -1 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
+ } else {
+ e.encUint(bincVdNegInt<<4, false, uint64(-v))
+ }
+}
+
+func (e *bincEncDriver) EncodeUint(v uint64) {
+ e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+ if v == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZero)
+ } else if pos && v >= 1 && v <= 16 {
+ e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd|0x0, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.encIntegerPrune(bd, pos, v, 4)
+ } else {
+ e.encIntegerPrune(bd, pos, v, 8)
+ }
+}
+
+func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(bincVdCustomExt<<4, uint64(length))
+ e.w.writen1(xtag)
+}
+
+func (e *bincEncDriver) EncodeArrayStart(length int) {
+ e.encLen(bincVdArray<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeMapStart(length int) {
+ e.encLen(bincVdMap<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeSymbol(v string) {
+ // if WriteSymbolsNoRefs {
+ // e.encodeString(c_UTF8, v)
+ // return
+ // }
+
+ //symbols only offer benefit when string length > 1.
+ //This is because strings with length 1 take only 2 bytes to store
+ //(bd with embedded length, and single byte for string val).
+
+ l := len(v)
+ if l == 0 {
+ e.encBytesLen(c_UTF8, 0)
+ return
+ } else if l == 1 {
+ e.encBytesLen(c_UTF8, 1)
+ e.w.writen1(v[0])
+ return
+ }
+ if e.m == nil {
+ e.m = make(map[string]uint16, 16)
+ }
+ ui, ok := e.m[v]
+ if ok {
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ } else {
+ e.s++
+ ui = e.s
+ //ui = uint16(atomic.AddUint32(&e.s, 1))
+ e.m[v] = ui
+ var lenprec uint8
+ if l <= math.MaxUint8 {
+ // lenprec = 0
+ } else if l <= math.MaxUint16 {
+ lenprec = 1
+ } else if int64(l) <= math.MaxUint32 {
+ lenprec = 2
+ } else {
+ lenprec = 3
+ }
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ if lenprec == 0 {
+ e.w.writen1(byte(l))
+ } else if lenprec == 1 {
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
+ } else if lenprec == 2 {
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
+ } else {
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
+ }
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writeb(v)
+ }
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+ //TODO: support bincUnicodeOther (for now, just use string or bytearray)
+ if c == c_RAW {
+ e.encLen(bincVdByteArray<<4, length)
+ } else {
+ e.encLen(bincVdString<<4, length)
+ }
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+ if l < 12 {
+ e.w.writen1(bd | uint8(l+4))
+ } else {
+ e.encLenNumber(bd, l)
+ }
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd | 0x02)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else {
+ e.w.writen1(bd | 0x03)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
+ }
+}
+
+//------------------------------------
+
+type bincDecSymbol struct {
+ s string
+ b []byte
+ i uint16
+}
+
+type bincDecDriver struct {
+ d *Decoder
+ h *BincHandle
+ r decReader
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ vd byte
+ vs byte
+ noStreamingCodec
+ decNoSeparator
+ b [scratchByteArrayLen]byte
+
+ // linear searching on this slice is ok,
+ // because we typically expect < 32 symbols in each stream.
+ s []bincDecSymbol
+}
+
+func (d *bincDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.vd = d.bd >> 4
+ d.vs = d.bd & 0x0f
+ d.bdRead = true
+}
+
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+ if d.vd == bincVdSpecial && d.vs == bincSpNil {
+ return valueTypeNil
+ } else if d.vd == bincVdByteArray {
+ return valueTypeBytes
+ } else if d.vd == bincVdString {
+ return valueTypeString
+ } else if d.vd == bincVdArray {
+ return valueTypeArray
+ } else if d.vd == bincVdMap {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *bincDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if rt == timeTypId {
+ if d.vd != bincVdTimestamp {
+ d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
+ return
+ }
+ tt, err := decodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ var vt *time.Time = v.(*time.Time)
+ *vt = tt
+ d.bdRead = false
+ }
+}
+
+func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
+ if vs&0x8 == 0 {
+ d.r.readb(d.b[0:defaultLen])
+ } else {
+ l := d.r.readn1()
+ if l > 8 {
+ d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
+ return
+ }
+ for i := l; i < 8; i++ {
+ d.b[i] = 0
+ }
+ d.r.readb(d.b[0:l])
+ }
+}
+
+func (d *bincDecDriver) decFloat() (f float64) {
+ //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
+ if x := d.vs & 0x7; x == bincFlBin32 {
+ d.decFloatPre(d.vs, 4)
+ f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
+ } else if x == bincFlBin64 {
+ d.decFloatPre(d.vs, 8)
+ f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
+ } else {
+ d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+ // need to inline the code (interface conversion and type assertion expensive)
+ switch d.vs {
+ case 0:
+ v = uint64(d.r.readn1())
+ case 1:
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ case 2:
+ d.b[4] = 0
+ d.r.readb(d.b[5:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 3:
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 4, 5, 6:
+ lim := int(7 - d.vs)
+ d.r.readb(d.b[lim:8])
+ for i := 0; i < lim; i++ {
+ d.b[i] = 0
+ }
+ v = uint64(bigen.Uint64(d.b[:8]))
+ case 7:
+ d.r.readb(d.b[:8])
+ v = uint64(bigen.Uint64(d.b[:8]))
+ default:
+ d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdPosInt {
+ ui = d.decUint()
+ } else if vd == bincVdNegInt {
+ ui = d.decUint()
+ neg = true
+ } else if vd == bincVdSmallInt {
+ ui = uint64(d.vs) + 1
+ } else if vd == bincVdSpecial {
+ if vs == bincSpZero {
+ //i = 0
+ } else if vs == bincSpNegOne {
+ neg = true
+ ui = 1
+ } else {
+ d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
+ return
+ }
+ } else {
+ d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ ui, neg := d.decCheckInteger()
+ i, overflow := chkOvf.SignedInt(ui)
+ if overflow {
+ d.d.errorf("simple: overflow converting %v to signed integer", ui)
+ return
+ }
+ if neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("binc: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("binc: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdSpecial {
+ d.bdRead = false
+ if vs == bincSpNan {
+ return math.NaN()
+ } else if vs == bincSpPosInf {
+ return math.Inf(1)
+ } else if vs == bincSpZeroFloat || vs == bincSpZero {
+ return
+ } else if vs == bincSpNegInf {
+ return math.Inf(-1)
+ } else {
+ d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
+ return
+ }
+ } else if vd == bincVdFloat {
+ f = d.decFloat()
+ } else {
+ f = float64(d.DecodeInt(64))
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("binc: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
+ // b = false
+ } else if bd == (bincVdSpecial | bincSpTrue) {
+ b = true
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadMapStart() (length int) {
+ if d.vd != bincVdMap {
+ d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadArrayStart() (length int) {
+ if d.vd != bincVdArray {
+ d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decLen() int {
+ if d.vs > 3 {
+ return int(d.vs - 4)
+ }
+ return int(d.decLenNumber())
+}
+
+func (d *bincDecDriver) decLenNumber() (v uint64) {
+ if x := d.vs; x == 0 {
+ v = uint64(d.r.readn1())
+ } else if x == 1 {
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ } else if x == 2 {
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ } else {
+ d.r.readb(d.b[:8])
+ v = bigen.Uint64(d.b[:8])
+ }
+ return
+}
+
+func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return
+ }
+ var slen int = -1
+ // var ok bool
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ slen = d.decLen()
+ if zerocopy {
+ if d.br {
+ bs2 = d.r.readx(slen)
+ } else if len(bs) == 0 {
+ bs2 = decByteSlice(d.r, slen, d.b[:])
+ } else {
+ bs2 = decByteSlice(d.r, slen, bs)
+ }
+ } else {
+ bs2 = decByteSlice(d.r, slen, bs)
+ }
+ if withString {
+ s = string(bs2)
+ }
+ case bincVdSymbol:
+ // zerocopy doesn't apply for symbols,
+ // as the values must be stored in a table for later use.
+ //
+ //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
+ //extract symbol
+ //if containsStringVal, read it and put in map
+ //else look in map for string value
+ var symbol uint16
+ vs := d.vs
+ if vs&0x8 == 0 {
+ symbol = uint16(d.r.readn1())
+ } else {
+ symbol = uint16(bigen.Uint16(d.r.readx(2)))
+ }
+ if d.s == nil {
+ d.s = make([]bincDecSymbol, 0, 16)
+ }
+
+ if vs&0x4 == 0 {
+ for i := range d.s {
+ j := &d.s[i]
+ if j.i == symbol {
+ bs2 = j.b
+ if withString {
+ if j.s == "" && bs2 != nil {
+ j.s = string(bs2)
+ }
+ s = j.s
+ }
+ break
+ }
+ }
+ } else {
+ switch vs & 0x3 {
+ case 0:
+ slen = int(d.r.readn1())
+ case 1:
+ slen = int(bigen.Uint16(d.r.readx(2)))
+ case 2:
+ slen = int(bigen.Uint32(d.r.readx(4)))
+ case 3:
+ slen = int(bigen.Uint64(d.r.readx(8)))
+ }
+ // since using symbols, do not store any part of
+ // the parameter bs in the map, as it might be a shared buffer.
+ // bs2 = decByteSlice(d.r, slen, bs)
+ bs2 = decByteSlice(d.r, slen, nil)
+ if withString {
+ s = string(bs2)
+ }
+ d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
+ }
+ default:
+ d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeString() (s string) {
+ // DecodeBytes does not accomodate symbols, whose impl stores string version in map.
+ // Use decStringAndBytes directly.
+ // return string(d.DecodeBytes(d.b[:], true, true))
+ _, s = d.decStringAndBytes(d.b[:], true, true)
+ return
+}
+
+func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if isstring {
+ bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
+ return
+ }
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return nil
+ }
+ var clen int
+ if d.vd == bincVdString || d.vd == bincVdByteArray {
+ clen = d.decLen()
+ } else {
+ d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, d.vd)
+ return
+ }
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd == bincVdCustomExt {
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ } else if d.vd == bincVdByteArray {
+ xbs = d.DecodeBytes(nil, false, true)
+ } else {
+ d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ n.v = valueTypeNil
+ case bincSpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case bincSpTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case bincSpNan:
+ n.v = valueTypeFloat
+ n.f = math.NaN()
+ case bincSpPosInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(1)
+ case bincSpNegInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(-1)
+ case bincSpZeroFloat:
+ n.v = valueTypeFloat
+ n.f = float64(0)
+ case bincSpZero:
+ n.v = valueTypeUint
+ n.u = uint64(0) // int8(0)
+ case bincSpNegOne:
+ n.v = valueTypeInt
+ n.i = int64(-1) // int8(-1)
+ default:
+ d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
+ }
+ case bincVdSmallInt:
+ n.v = valueTypeUint
+ n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ case bincVdPosInt:
+ n.v = valueTypeUint
+ n.u = d.decUint()
+ case bincVdNegInt:
+ n.v = valueTypeInt
+ n.i = -(int64(d.decUint()))
+ case bincVdFloat:
+ n.v = valueTypeFloat
+ n.f = d.decFloat()
+ case bincVdSymbol:
+ n.v = valueTypeSymbol
+ n.s = d.DecodeString()
+ case bincVdString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case bincVdByteArray:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case bincVdTimestamp:
+ n.v = valueTypeTimestamp
+ tt, err := decodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ n.t = tt
+ case bincVdCustomExt:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case bincVdArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bincVdMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+// - only integers up to 64 bits of precision are supported.
+// big integers are unsupported.
+// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+// extended precision and decimal IEEE 754 floats are unsupported.
+// - Only UTF-8 strings supported.
+// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+ BasicHandle
+ binaryEncodingType
+}
+
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
+ return &bincEncDriver{e: e, w: e.w}
+}
+
+func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
+ return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *bincEncDriver) reset() {
+ e.w = e.e.w
+ e.s = 0
+ e.m = nil
+}
+
+func (d *bincDecDriver) reset() {
+ d.r = d.d.r
+ d.s = nil
+ d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
+}
+
+var _ decDriver = (*bincDecDriver)(nil)
+var _ encDriver = (*bincEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go
new file mode 100644
index 0000000..a224cd3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go
@@ -0,0 +1,585 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+)
+
+const (
+ cborMajorUint byte = iota
+ cborMajorNegInt
+ cborMajorBytes
+ cborMajorText
+ cborMajorArray
+ cborMajorMap
+ cborMajorTag
+ cborMajorOther
+)
+
+const (
+ cborBdFalse byte = 0xf4 + iota
+ cborBdTrue
+ cborBdNil
+ cborBdUndefined
+ cborBdExt
+ cborBdFloat16
+ cborBdFloat32
+ cborBdFloat64
+)
+
+const (
+ cborBdIndefiniteBytes byte = 0x5f
+ cborBdIndefiniteString = 0x7f
+ cborBdIndefiniteArray = 0x9f
+ cborBdIndefiniteMap = 0xbf
+ cborBdBreak = 0xff
+)
+
+const (
+ CborStreamBytes byte = 0x5f
+ CborStreamString = 0x7f
+ CborStreamArray = 0x9f
+ CborStreamMap = 0xbf
+ CborStreamBreak = 0xff
+)
+
+const (
+ cborBaseUint byte = 0x00
+ cborBaseNegInt = 0x20
+ cborBaseBytes = 0x40
+ cborBaseString = 0x60
+ cborBaseArray = 0x80
+ cborBaseMap = 0xa0
+ cborBaseTag = 0xc0
+ cborBaseSimple = 0xe0
+)
+
+// -------------------
+
+type cborEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ w encWriter
+ h *CborHandle
+ x [8]byte
+}
+
+func (e *cborEncDriver) EncodeNil() {
+ e.w.writen1(cborBdNil)
+}
+
+func (e *cborEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(cborBdTrue)
+ } else {
+ e.w.writen1(cborBdFalse)
+ }
+}
+
+func (e *cborEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(cborBdFloat32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *cborEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(cborBdFloat64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *cborEncDriver) encUint(v uint64, bd byte) {
+ if v <= 0x17 {
+ e.w.writen1(byte(v) + bd)
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd+0x18, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 0x19)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 0x1a)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 0x1b)
+ bigenHelper{e.x[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-1-v), cborBaseNegInt)
+ } else {
+ e.encUint(uint64(v), cborBaseUint)
+ }
+}
+
+func (e *cborEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, cborBaseUint)
+}
+
+func (e *cborEncDriver) encLen(bd byte, length int) {
+ e.encUint(uint64(length), bd)
+}
+
+func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ e.encUint(uint64(xtag), cborBaseTag)
+ if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ e.encUint(uint64(re.Tag), cborBaseTag)
+ if re.Data != nil {
+ en.encode(re.Data)
+ } else if re.Value == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *cborEncDriver) EncodeArrayStart(length int) {
+ e.encLen(cborBaseArray, length)
+}
+
+func (e *cborEncDriver) EncodeMapStart(length int) {
+ e.encLen(cborBaseMap, length)
+}
+
+func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
+ e.encLen(cborBaseString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *cborEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ if c == c_RAW {
+ e.encLen(cborBaseBytes, len(v))
+ } else {
+ e.encLen(cborBaseString, len(v))
+ }
+ e.w.writeb(v)
+}
+
+// ----------------------
+
+type cborDecDriver struct {
+ d *Decoder
+ h *CborHandle
+ r decReader
+ b [scratchByteArrayLen]byte
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ noBuiltInTypes
+ decNoSeparator
+}
+
+func (d *cborDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+ if d.bd == cborBdNil {
+ return valueTypeNil
+ } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
+ return valueTypeBytes
+ } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
+ return valueTypeString
+ } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+ return valueTypeArray
+ } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *cborDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ // treat Nil and Undefined as nil values
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) CheckBreak() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdBreak {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) decUint() (ui uint64) {
+ v := d.bd & 0x1f
+ if v <= 0x17 {
+ ui = uint64(v)
+ } else {
+ if v == 0x18 {
+ ui = uint64(d.r.readn1())
+ } else if v == 0x19 {
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ } else if v == 0x1a {
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ } else if v == 0x1b {
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ } else {
+ d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ return
+}
+
+func (d *cborDecDriver) decCheckInteger() (neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ major := d.bd >> 5
+ if major == cborMajorUint {
+ } else if major == cborMajorNegInt {
+ neg = true
+ } else {
+ d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
+ return
+ }
+ return
+}
+
+func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ neg := d.decCheckInteger()
+ ui := d.decUint()
+ // check if this number can be converted to an int without overflow
+ var overflow bool
+ if neg {
+ if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
+ d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
+ return
+ }
+ i = -i
+ } else {
+ if i, overflow = chkOvf.SignedInt(ui); overflow {
+ d.d.errorf("cbor: overflow converting %v to signed integer", ui)
+ return
+ }
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("cbor: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ if d.decCheckInteger() {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ ui = d.decUint()
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("cbor: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdFloat16 {
+ f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
+ } else if bd == cborBdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if bd == cborBdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else if bd >= cborBaseUint && bd < cborBaseBytes {
+ f = float64(d.DecodeInt(64))
+ } else {
+ d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
+ return
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("cbor: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *cborDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdTrue {
+ b = true
+ } else if bd == cborBdFalse {
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) ReadMapStart() (length int) {
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteMap {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) ReadArrayStart() (length int) {
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteArray {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) decLen() int {
+ return int(d.decUint())
+}
+
+func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
+ d.bdRead = false
+ for {
+ if d.CheckBreak() {
+ break
+ }
+ if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
+ d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
+ return nil
+ }
+ n := d.decLen()
+ oldLen := len(bs)
+ newLen := oldLen + n
+ if newLen > cap(bs) {
+ bs2 := make([]byte, newLen, 2*cap(bs)+n)
+ copy(bs2, bs)
+ bs = bs2
+ } else {
+ bs = bs[:newLen]
+ }
+ d.r.readb(bs[oldLen:newLen])
+ // bs = append(bs, d.r.readn()...)
+ d.bdRead = false
+ }
+ d.bdRead = false
+ return bs
+}
+
+func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return nil
+ }
+ if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+ if bs == nil {
+ return d.decAppendIndefiniteBytes(nil)
+ }
+ return d.decAppendIndefiniteBytes(bs[:0])
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *cborDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ u := d.decUint()
+ d.bdRead = false
+ realxtag = u
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ d.d.decode(&re.Value)
+ } else if xtag != realxtag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
+ return
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case cborBdNil:
+ n.v = valueTypeNil
+ case cborBdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case cborBdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case cborBdFloat16, cborBdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
+ case cborBdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
+ case cborBdIndefiniteBytes:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case cborBdIndefiniteString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case cborBdIndefiniteArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case cborBdIndefiniteMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ switch {
+ case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
+ }
+ case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ case d.bd >= cborBaseBytes && d.bd < cborBaseString:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case d.bd >= cborBaseString && d.bd < cborBaseArray:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case d.bd >= cborBaseArray && d.bd < cborBaseMap:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case d.bd >= cborBaseMap && d.bd < cborBaseTag:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
+ n.v = valueTypeExt
+ n.u = d.decUint()
+ n.l = nil
+ // d.bdRead = false
+ // d.d.decode(&re.Value) // handled by decode itself.
+ // decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ return
+ }
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+// -------------------------
+
+// CborHandle is a Handle for the CBOR encoding format,
+// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
+//
+// CBOR is comprehensively supported, including support for:
+// - indefinite-length arrays/maps/bytes/strings
+// - (extension) tags in range 0..0xffff (0 .. 65535)
+// - half, single and double-precision floats
+// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
+// - nil, true, false, ...
+// - arrays and maps, bytes and text strings
+//
+// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
+// Users can implement them as needed (using SetExt), including spec-documented ones:
+// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
+//
+// To encode with indefinite lengths (streaming), users will use
+// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
+//
+// For example, to encode "one-byte" as an indefinite length string:
+// var buf bytes.Buffer
+// e := NewEncoder(&buf, new(CborHandle))
+// buf.WriteByte(CborStreamString)
+// e.MustEncode("one-")
+// e.MustEncode("byte")
+// buf.WriteByte(CborStreamBreak)
+// encodedBytes := buf.Bytes()
+// var vv interface{}
+// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
+// // Now, vv contains the same string "one-byte"
+//
+type CborHandle struct {
+ binaryEncodingType
+ BasicHandle
+}
+
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
+ return &cborEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
+ return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *cborEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *cborDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*cborDecDriver)(nil)
+var _ encDriver = (*cborEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go
new file mode 100644
index 0000000..7e56f1e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go
@@ -0,0 +1,2019 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "time"
+)
+
+// Some tagging information for error messages.
+const (
+ msgBadDesc = "Unrecognized descriptor byte"
+ msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
+)
+
+var (
+ onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct")
+ cannotDecodeIntoNilErr = errors.New("cannot decode into nil")
+)
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+ unreadn1()
+
+ // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
+ // just return a view of the []byte being decoded from.
+ // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
+ readx(n int) []byte
+ readb([]byte)
+ readn1() uint8
+ readn1eof() (v uint8, eof bool)
+ numread() int // number of bytes read
+ track()
+ stopTrack() []byte
+}
+
+type decReaderByteScanner interface {
+ io.Reader
+ io.ByteScanner
+}
+
+type decDriver interface {
+ // this will check if the next token is a break.
+ CheckBreak() bool
+ TryDecodeAsNil() bool
+ // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
+ ContainerType() (vt valueType)
+ IsBuiltinType(rt uintptr) bool
+ DecodeBuiltin(rt uintptr, v interface{})
+
+ // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+ // For maps and arrays, it will not do the decoding in-band, but will signal
+ // the decoder, so that is done later, by setting the decNaked.valueType field.
+ //
+ // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ // for extensions, DecodeNaked must read the tag and the []byte if it exists.
+ // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+ // that stores the subsequent value in-band, and complete reading the RawExt.
+ //
+ // extensions should also use readx to decode them, for efficiency.
+ // kInterface will extract the detached byte slice if it has to pass it outside its realm.
+ DecodeNaked()
+ DecodeInt(bitsize uint8) (i int64)
+ DecodeUint(bitsize uint8) (ui uint64)
+ DecodeFloat(chkOverflow32 bool) (f float64)
+ DecodeBool() (b bool)
+ // DecodeString can also decode symbols.
+ // It looks redundant as DecodeBytes is available.
+ // However, some codecs (e.g. binc) support symbols and can
+ // return a pre-stored string value, meaning that it can bypass
+ // the cost of []byte->string conversion.
+ DecodeString() (s string)
+
+ // DecodeBytes may be called directly, without going through reflection.
+ // Consequently, it must be designed to handle possible nil.
+ DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
+
+ // decodeExt will decode into a *RawExt or into an extension.
+ DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64)
+ // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+ ReadMapStart() int
+ ReadArrayStart() int
+
+ reset()
+ uncacheRead()
+}
+
+type decNoSeparator struct{}
+
+func (_ decNoSeparator) ReadEnd() {}
+func (_ decNoSeparator) uncacheRead() {}
+
+type DecodeOptions struct {
+ // MapType specifies type to use during schema-less decoding of a map in the stream.
+ // If nil, we use map[interface{}]interface{}
+ MapType reflect.Type
+
+ // SliceType specifies type to use during schema-less decoding of an array in the stream.
+ // If nil, we use []interface{}
+ SliceType reflect.Type
+
+ // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with.
+ // If 0 or negative, we default to a sensible value based on the size of an element in the collection.
+ //
+ // For example, when decoding, a stream may say that it has MAX_UINT elements.
+ // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash.
+ // Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
+ MaxInitLen int
+
+ // If ErrorIfNoField, return an error when decoding a map
+ // from a codec stream into a struct, and no matching struct field is found.
+ ErrorIfNoField bool
+
+ // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
+ // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
+ // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
+ ErrorIfNoArrayExpand bool
+
+ // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
+ SignedInteger bool
+
+ // MapValueReset controls how we decode into a map value.
+ //
+ // By default, we MAY retrieve the mapping for a key, and then decode into that.
+ // However, especially with big maps, that retrieval may be expensive and unnecessary
+ // if the stream already contains all that is necessary to recreate the value.
+ //
+ // If true, we will never retrieve the previous mapping,
+ // but rather decode into a new value and set that in the map.
+ //
+ // If false, we will retrieve the previous mapping if necessary e.g.
+ // the previous mapping is a pointer, or is a struct or array with pre-set state,
+ // or is an interface.
+ MapValueReset bool
+
+ // InterfaceReset controls how we decode into an interface.
+ //
+ // By default, when we see a field that is an interface{...},
+ // or a map with interface{...} value, we will attempt decoding into the
+ // "contained" value.
+ //
+ // However, this prevents us from reading a string into an interface{}
+ // that formerly contained a number.
+ //
+ // If true, we will decode into a new "blank" value, and set that in the interface.
+ // If false, we will decode into whatever is contained in the interface.
+ InterfaceReset bool
+
+ // InternString controls interning of strings during decoding.
+ //
+ // Some handles, e.g. json, typically will read map keys as strings.
+ // If the set of keys are finite, it may help reduce allocation to
+ // look them up from a map (than to allocate them afresh).
+ //
+ // Note: Handles will be smart when using the intern functionality.
+ // So everything will not be interned.
+ InternString bool
+}
+
+// ------------------------------------
+
+// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods
+// of io.Reader, io.ByteScanner.
+type ioDecByteScanner struct {
+ r io.Reader
+ l byte // last byte
+ ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread
+ b [1]byte // tiny buffer for reading single bytes
+}
+
+func (z *ioDecByteScanner) Read(p []byte) (n int, err error) {
+ var firstByte bool
+ if z.ls == 1 {
+ z.ls = 2
+ p[0] = z.l
+ if len(p) == 1 {
+ n = 1
+ return
+ }
+ firstByte = true
+ p = p[1:]
+ }
+ n, err = z.r.Read(p)
+ if n > 0 {
+ if err == io.EOF && n == len(p) {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ z.l = p[n-1]
+ z.ls = 2
+ }
+ if firstByte {
+ n++
+ }
+ return
+}
+
+func (z *ioDecByteScanner) ReadByte() (c byte, err error) {
+ n, err := z.Read(z.b[:])
+ if n == 1 {
+ c = z.b[0]
+ if err == io.EOF {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ }
+ return
+}
+
+func (z *ioDecByteScanner) UnreadByte() (err error) {
+ x := z.ls
+ if x == 0 {
+ err = errors.New("cannot unread - nothing has been read")
+ } else if x == 1 {
+ err = errors.New("cannot unread - last byte has not been read")
+ } else if x == 2 {
+ z.ls = 1
+ }
+ return
+}
+
+// ioDecReader is a decReader that reads off an io.Reader
+type ioDecReader struct {
+ br decReaderByteScanner
+ // temp byte array re-used internally for efficiency during read.
+ // shares buffer with Decoder, so we keep size of struct within 8 words.
+ x *[scratchByteArrayLen]byte
+ bs ioDecByteScanner
+ n int // num read
+ tr []byte // tracking bytes read
+ trb bool
+}
+
+func (z *ioDecReader) numread() int {
+ return z.n
+}
+
+func (z *ioDecReader) readx(n int) (bs []byte) {
+ if n <= 0 {
+ return
+ }
+ if n < len(z.x) {
+ bs = z.x[:n]
+ } else {
+ bs = make([]byte, n)
+ }
+ if _, err := io.ReadAtLeast(z.br, bs, n); err != nil {
+ panic(err)
+ }
+ z.n += len(bs)
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+ return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := io.ReadAtLeast(z.br, bs, len(bs))
+ z.n += n
+ if err != nil {
+ panic(err)
+ }
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+}
+
+func (z *ioDecReader) readn1() (b uint8) {
+ b, err := z.br.ReadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ return b
+}
+
+func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
+ b, err := z.br.ReadByte()
+ if err == nil {
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ } else if err == io.EOF {
+ eof = true
+ } else {
+ panic(err)
+ }
+ return
+}
+
+func (z *ioDecReader) unreadn1() {
+ err := z.br.UnreadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n--
+ if z.trb {
+ if l := len(z.tr) - 1; l >= 0 {
+ z.tr = z.tr[:l]
+ }
+ }
+}
+
+func (z *ioDecReader) track() {
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+ z.trb = true
+}
+
+func (z *ioDecReader) stopTrack() (bs []byte) {
+ z.trb = false
+ return z.tr
+}
+
+// ------------------------------------
+
+var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read")
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+ b []byte // data
+ c int // cursor
+ a int // available
+ t int // track start
+}
+
+func (z *bytesDecReader) reset(in []byte) {
+ z.b = in
+ z.a = len(in)
+ z.c = 0
+ z.t = 0
+}
+
+func (z *bytesDecReader) numread() int {
+ return z.c
+}
+
+func (z *bytesDecReader) unreadn1() {
+ if z.c == 0 || len(z.b) == 0 {
+ panic(bytesDecReaderCannotUnreadErr)
+ }
+ z.c--
+ z.a++
+ return
+}
+
+func (z *bytesDecReader) readx(n int) (bs []byte) {
+ // slicing from a non-constant start position is more expensive,
+ // as more computation is required to decipher the pointer start position.
+ // However, we do it only once, and it's better than reslicing both z.b and return value.
+
+ if n <= 0 {
+ } else if z.a == 0 {
+ panic(io.EOF)
+ } else if n > z.a {
+ panic(io.ErrUnexpectedEOF)
+ } else {
+ c0 := z.c
+ z.c = c0 + n
+ z.a = z.a - n
+ bs = z.b[c0:z.c]
+ }
+ return
+}
+
+func (z *bytesDecReader) readn1() (v uint8) {
+ if z.a == 0 {
+ panic(io.EOF)
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
+ if z.a == 0 {
+ eof = true
+ return
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+ copy(bs, z.readx(len(bs)))
+}
+
+func (z *bytesDecReader) track() {
+ z.t = z.c
+}
+
+func (z *bytesDecReader) stopTrack() (bs []byte) {
+ return z.b[z.t:z.c]
+}
+
+// ------------------------------------
+
+type decFnInfo struct {
+ d *Decoder
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+}
+
+// ----------------------------------------
+
+type decFn struct {
+ i decFnInfo
+ f func(*decFnInfo, reflect.Value)
+}
+
+func (f *decFnInfo) builtin(rv reflect.Value) {
+ f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface())
+}
+
+func (f *decFnInfo) rawExt(rv reflect.Value) {
+ f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
+}
+
+func (f *decFnInfo) ext(rv reflect.Value) {
+ f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
+}
+
+func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) {
+ if indir == -1 {
+ v = rv.Addr().Interface()
+ } else if indir == 0 {
+ v = rv.Interface()
+ } else {
+ for j := int8(0); j < indir; j++ {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ v = rv.Interface()
+ }
+ return
+}
+
+func (f *decFnInfo) selferUnmarshal(rv reflect.Value) {
+ f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d)
+}
+
+func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) {
+ bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler)
+ xbs := f.d.d.DecodeBytes(nil, false, true)
+ if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) textUnmarshal(rv reflect.Value) {
+ tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler)
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) {
+ tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler)
+ // bs := f.d.d.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) kErr(rv reflect.Value) {
+ f.d.errorf("no decoding function defined for kind %v", rv.Kind())
+}
+
+func (f *decFnInfo) kString(rv reflect.Value) {
+ rv.SetString(f.d.d.DecodeString())
+}
+
+func (f *decFnInfo) kBool(rv reflect.Value) {
+ rv.SetBool(f.d.d.DecodeBool())
+}
+
+func (f *decFnInfo) kInt(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(intBitsize))
+}
+
+func (f *decFnInfo) kInt64(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(64))
+}
+
+func (f *decFnInfo) kInt32(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(32))
+}
+
+func (f *decFnInfo) kInt8(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(8))
+}
+
+func (f *decFnInfo) kInt16(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(16))
+}
+
+func (f *decFnInfo) kFloat32(rv reflect.Value) {
+ rv.SetFloat(f.d.d.DecodeFloat(true))
+}
+
+func (f *decFnInfo) kFloat64(rv reflect.Value) {
+ rv.SetFloat(f.d.d.DecodeFloat(false))
+}
+
+func (f *decFnInfo) kUint8(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(8))
+}
+
+func (f *decFnInfo) kUint64(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(64))
+}
+
+func (f *decFnInfo) kUint(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUintptr(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUint32(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(32))
+}
+
+func (f *decFnInfo) kUint16(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(16))
+}
+
+// func (f *decFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// rv.Set(reflect.New(rv.Type().Elem()))
+// }
+// f.d.decodeValue(rv.Elem())
+// }
+
+// var kIntfCtr uint64
+
+func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
+ // nil interface:
+ // use some hieristics to decode it appropriately
+ // based on the detected next value in the stream.
+ d := f.d
+ d.d.DecodeNaked()
+ n := &d.n
+ if n.v == valueTypeNil {
+ return
+ }
+ // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
+ // if num := f.ti.rt.NumMethod(); num > 0 {
+ if f.ti.numMeth > 0 {
+ d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
+ return
+ }
+ // var useRvn bool
+ switch n.v {
+ case valueTypeMap:
+ // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp {
+ // } else if d.h.MapType == mapStrIntfTyp { // for json performance
+ // }
+ if d.mtid == 0 || d.mtid == mapIntfIntfTypId {
+ l := len(n.ms)
+ n.ms = append(n.ms, nil)
+ var v2 interface{} = &n.ms[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ms = n.ms[:l]
+ } else if d.mtid == mapStrIntfTypId { // for json performance
+ l := len(n.ns)
+ n.ns = append(n.ns, nil)
+ var v2 interface{} = &n.ns[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ns = n.ns[:l]
+ } else {
+ rvn = reflect.New(d.h.MapType).Elem()
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeArray:
+ // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp {
+ if d.stid == 0 || d.stid == intfSliceTypId {
+ l := len(n.ss)
+ n.ss = append(n.ss, nil)
+ var v2 interface{} = &n.ss[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ss = n.ss[:l]
+ } else {
+ rvn = reflect.New(d.h.SliceType).Elem()
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeExt:
+ var v interface{}
+ tag, bytes := n.u, n.l // calling decode below might taint the values
+ if bytes == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ d.decode(v2)
+ v = *v2
+ n.is = n.is[:l]
+ }
+ bfn := d.h.getExtForTag(tag)
+ if bfn == nil {
+ var re RawExt
+ re.Tag = tag
+ re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
+ rvn = reflect.ValueOf(re)
+ } else {
+ rvnA := reflect.New(bfn.rt)
+ rvn = rvnA.Elem()
+ if bytes != nil {
+ bfn.ext.ReadExt(rvnA.Interface(), bytes)
+ } else {
+ bfn.ext.UpdateExt(rvnA.Interface(), v)
+ }
+ }
+ case valueTypeNil:
+ // no-op
+ case valueTypeInt:
+ rvn = reflect.ValueOf(&n.i).Elem()
+ case valueTypeUint:
+ rvn = reflect.ValueOf(&n.u).Elem()
+ case valueTypeFloat:
+ rvn = reflect.ValueOf(&n.f).Elem()
+ case valueTypeBool:
+ rvn = reflect.ValueOf(&n.b).Elem()
+ case valueTypeString, valueTypeSymbol:
+ rvn = reflect.ValueOf(&n.s).Elem()
+ case valueTypeBytes:
+ rvn = reflect.ValueOf(&n.l).Elem()
+ case valueTypeTimestamp:
+ rvn = reflect.ValueOf(&n.t).Elem()
+ default:
+ panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v))
+ }
+ return
+}
+
+func (f *decFnInfo) kInterface(rv reflect.Value) {
+ // debugf("\t===> kInterface")
+
+ // Note:
+ // A consequence of how kInterface works, is that
+ // if an interface already contains something, we try
+ // to decode into what was there before.
+ // We do not replace with a generic value (as got from decodeNaked).
+
+ var rvn reflect.Value
+ if rv.IsNil() {
+ rvn = f.kInterfaceNaked()
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ }
+ } else if f.d.h.InterfaceReset {
+ rvn = f.kInterfaceNaked()
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ } else {
+ // reset to zero value based on current type in there.
+ rv.Set(reflect.Zero(rv.Elem().Type()))
+ }
+ } else {
+ rvn = rv.Elem()
+ // Note: interface{} is settable, but underlying type may not be.
+ // Consequently, we have to set the reflect.Value directly.
+ // if underlying type is settable (e.g. ptr or interface),
+ // we just decode into it.
+ // Else we create a settable value, decode into it, and set on the interface.
+ if rvn.CanSet() {
+ f.d.decodeValue(rvn, nil)
+ } else {
+ rvn2 := reflect.New(rvn.Type()).Elem()
+ rvn2.Set(rvn)
+ f.d.decodeValue(rvn2, nil)
+ rv.Set(rvn2)
+ }
+ }
+}
+
+func (f *decFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ d := f.d
+ dd := d.d
+ cr := d.cr
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeMap {
+ containerLen := dd.ReadMapStart()
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+ tisfi := fti.sfi
+ hasLen := containerLen >= 0
+ if hasLen {
+ for j := 0; j < containerLen; j++ {
+ // rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
+ // rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ } else {
+ d.structFieldNotFound(-1, rvkencname)
+ }
+ }
+ } else {
+ for j := 0; !dd.CheckBreak(); j++ {
+ // rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
+ // rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ } else {
+ d.structFieldNotFound(-1, rvkencname)
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else if ctyp == valueTypeArray {
+ containerLen := dd.ReadArrayStart()
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ return
+ }
+ // Not much gain from doing it two ways for array.
+ // Arrays are not used as much for structs.
+ hasLen := containerLen >= 0
+ for j, si := range fti.sfip {
+ if hasLen {
+ if j == containerLen {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ }
+ if containerLen > len(fti.sfip) {
+ // read remaining values and throw away
+ for j := len(fti.sfip); j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ d.structFieldNotFound(j, "")
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ } else {
+ f.d.error(onlyMapOrArrayCanDecodeIntoStructErr)
+ return
+ }
+}
+
+func (f *decFnInfo) kSlice(rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ // This way, the order can be kept (as order is lost with map).
+ ti := f.ti
+ d := f.d
+ dd := d.d
+ rtelem0 := ti.rt.Elem()
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
+ f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt)
+ }
+ if f.seq == seqTypeChan {
+ bs2 := dd.DecodeBytes(nil, false, true)
+ ch := rv.Interface().(chan<- byte)
+ for _, b := range bs2 {
+ ch <- b
+ }
+ } else {
+ rvbs := rv.Bytes()
+ bs2 := dd.DecodeBytes(rvbs, false, false)
+ if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
+ if rv.CanSet() {
+ rv.SetBytes(bs2)
+ } else {
+ copy(rvbs, bs2)
+ }
+ }
+ }
+ return
+ }
+
+ // array := f.seq == seqTypeChan
+
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
+
+ // // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ if f.seq == seqTypeSlice {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
+ } else {
+ rv.SetLen(0)
+ }
+ } else if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeChan(ti.rt, 0))
+ }
+ }
+ slh.End()
+ return
+ }
+
+ rtelem := rtelem0
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ fn := d.getDecFn(rtelem, true, true)
+
+ var rv0, rv9 reflect.Value
+ rv0 = rv
+ rvChanged := false
+
+ // for j := 0; j < containerLenS; j++ {
+ var rvlen int
+ if containerLenS > 0 { // hasLen
+ if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ rv.Set(reflect.MakeChan(ti.rt, rvlen))
+ }
+ // handle chan specially:
+ for j := 0; j < containerLenS; j++ {
+ rv9 = reflect.New(rtelem0).Elem()
+ slh.ElemContainerState(j)
+ d.decodeValue(rv9, fn)
+ rv.Send(rv9)
+ }
+ } else { // slice or array
+ var truncated bool // says len of sequence is not same as expected number of elements
+ numToRead := containerLenS // if truncated, reset numToRead
+
+ rvcap := rv.Cap()
+ rvlen = rv.Len()
+ if containerLenS > rvcap {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, containerLenS)
+ } else {
+ oldRvlenGtZero := rvlen > 0
+ rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ if truncated {
+ if rvlen <= rvcap {
+ rv.SetLen(rvlen)
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
+ reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
+ }
+ rvcap = rvlen
+ }
+ numToRead = rvlen
+ } else if containerLenS != rvlen {
+ if f.seq == seqTypeSlice {
+ rv.SetLen(containerLenS)
+ rvlen = containerLenS
+ }
+ }
+ j := 0
+ // we read up to the numToRead
+ for ; j < numToRead; j++ {
+ slh.ElemContainerState(j)
+ d.decodeValue(rv.Index(j), fn)
+ }
+
+ // if slice, expand and read up to containerLenS (or EOF) iff truncated
+ // if array, swallow all the rest.
+
+ if f.seq == seqTypeArray {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ } else if truncated { // slice was truncated, as chan NOT in this block
+ for ; j < containerLenS; j++ {
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ slh.ElemContainerState(j)
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ } else {
+ rvlen = rv.Len()
+ j := 0
+ for ; !dd.CheckBreak(); j++ {
+ if f.seq == seqTypeChan {
+ slh.ElemContainerState(j)
+ rv9 = reflect.New(rtelem0).Elem()
+ d.decodeValue(rv9, fn)
+ rv.Send(rv9)
+ } else {
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= rvlen {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, j+1)
+ decodeIntoBlank = true
+ } else { // if f.seq == seqTypeSlice
+ // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0))
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ rvlen++
+ rvChanged = true
+ }
+ } else { // slice or array
+ rv9 = rv.Index(j)
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else { // seqTypeSlice
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ if f.seq == seqTypeSlice {
+ if j < rvlen {
+ rv.SetLen(j)
+ } else if j == 0 && rv.IsNil() {
+ rv = reflect.MakeSlice(ti.rt, 0, 0)
+ rvChanged = true
+ }
+ }
+ }
+ slh.End()
+
+ if rvChanged {
+ rv0.Set(rv)
+ }
+}
+
+func (f *decFnInfo) kArray(rv reflect.Value) {
+ // f.d.decodeValue(rv.Slice(0, rv.Len()))
+ f.kSlice(rv.Slice(0, rv.Len()))
+}
+
+func (f *decFnInfo) kMap(rv reflect.Value) {
+ d := f.d
+ dd := d.d
+ containerLen := dd.ReadMapStart()
+ cr := d.cr
+ ti := f.ti
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(ti.rt))
+ }
+
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+
+ ktype, vtype := ti.rt.Key(), ti.rt.Elem()
+ ktypeId := reflect.ValueOf(ktype).Pointer()
+ vtypeKind := vtype.Kind()
+ var keyFn, valFn *decFn
+ var xtyp reflect.Type
+ for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
+ }
+ keyFn = d.getDecFn(xtyp, true, true)
+ for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
+ }
+ valFn = d.getDecFn(xtyp, true, true)
+ var mapGet, mapSet bool
+ if !f.d.h.MapValueReset {
+ // if pointer, mapGet = true
+ // if interface, mapGet = true if !DecodeNakedAlways (else false)
+ // if builtin, mapGet = false
+ // else mapGet = true
+ if vtypeKind == reflect.Ptr {
+ mapGet = true
+ } else if vtypeKind == reflect.Interface {
+ if !f.d.h.InterfaceReset {
+ mapGet = true
+ }
+ } else if !isImmutableKind(vtypeKind) {
+ mapGet = true
+ }
+ }
+
+ var rvk, rvv, rvz reflect.Value
+
+ // for j := 0; j < containerLen; j++ {
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.decodeValue(rvk, keyFn)
+
+ // special case if a byte array.
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk.Bytes()))
+ }
+ }
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
+ if mapGet {
+ rvv = rv.MapIndex(rvk)
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.decodeValue(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ }
+ } else {
+ for j := 0; !dd.CheckBreak(); j++ {
+ rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.decodeValue(rvk, keyFn)
+
+ // special case if a byte array.
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk.Bytes()))
+ }
+ }
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
+ if mapGet {
+ rvv = rv.MapIndex(rvk)
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.decodeValue(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+type decRtidFn struct {
+ rtid uintptr
+ fn decFn
+}
+
+// decNaked is used to keep track of the primitives decoded.
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+// However, RawExt is, and we accomodate for extensions that decode
+// RawExt from DecodeNaked, but need to decode the value subsequently.
+// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
+//
+// However, decNaked also keeps some arrays of default maps and slices
+// used in DecodeNaked. This way, we can get a pointer to it
+// without causing a new heap allocation.
+//
+// kInterfaceNaked will ensure that there is no allocation for the common
+// uses.
+type decNaked struct {
+ // r RawExt // used for RawExt, uint, []byte.
+ u uint64
+ i int64
+ f float64
+ l []byte
+ s string
+ t time.Time
+ b bool
+ v valueType
+
+ // stacks for reducing allocation
+ is []interface{}
+ ms []map[interface{}]interface{}
+ ns []map[string]interface{}
+ ss [][]interface{}
+ // rs []RawExt
+
+ // keep arrays at the bottom? Chance is that they are not used much.
+ ia [4]interface{}
+ ma [4]map[interface{}]interface{}
+ na [4]map[string]interface{}
+ sa [4][]interface{}
+ // ra [2]RawExt
+}
+
+func (n *decNaked) reset() {
+ if n.ss != nil {
+ n.ss = n.ss[:0]
+ }
+ if n.is != nil {
+ n.is = n.is[:0]
+ }
+ if n.ms != nil {
+ n.ms = n.ms[:0]
+ }
+ if n.ns != nil {
+ n.ns = n.ns[:0]
+ }
+}
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+ // hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
+ // Try to put things that go together to fit within a cache line (8 words).
+
+ d decDriver
+ // NOTE: Decoder shouldn't call it's read methods,
+ // as the handler MAY need to do some coordination.
+ r decReader
+ // sa [initCollectionCap]decRtidFn
+ h *BasicHandle
+ hh Handle
+
+ be bool // is binary encoding
+ bytes bool // is bytes reader
+ js bool // is json handle
+
+ rb bytesDecReader
+ ri ioDecReader
+ cr containerStateRecv
+
+ s []decRtidFn
+ f map[uintptr]*decFn
+
+ // _ uintptr // for alignment purposes, so next one starts from a cache line
+
+ // cache the mapTypeId and sliceTypeId for faster comparisons
+ mtid uintptr
+ stid uintptr
+
+ n decNaked
+ b [scratchByteArrayLen]byte
+ is map[string]string // used for interning strings
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered reader
+// (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.Reset(r)
+ return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.ResetBytes(in)
+ return d
+}
+
+func newDecoder(h Handle) *Decoder {
+ d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ n := &d.n
+ // n.rs = n.ra[:0]
+ n.ms = n.ma[:0]
+ n.is = n.ia[:0]
+ n.ns = n.na[:0]
+ n.ss = n.sa[:0]
+ _, d.js = h.(*JsonHandle)
+ if d.h.InternString {
+ d.is = make(map[string]string, 32)
+ }
+ d.d = h.newDecDriver(d)
+ d.cr, _ = d.d.(containerStateRecv)
+ // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri})
+ return d
+}
+
+func (d *Decoder) resetCommon() {
+ d.n.reset()
+ d.d.reset()
+ // reset all things which were cached from the Handle,
+ // but could be changed.
+ d.mtid, d.stid = 0, 0
+ if d.h.MapType != nil {
+ d.mtid = reflect.ValueOf(d.h.MapType).Pointer()
+ }
+ if d.h.SliceType != nil {
+ d.stid = reflect.ValueOf(d.h.SliceType).Pointer()
+ }
+}
+
+func (d *Decoder) Reset(r io.Reader) {
+ d.ri.x = &d.b
+ // d.s = d.sa[:0]
+ d.ri.bs.r = r
+ var ok bool
+ d.ri.br, ok = r.(decReaderByteScanner)
+ if !ok {
+ d.ri.br = &d.ri.bs
+ }
+ d.r = &d.ri
+ d.resetCommon()
+}
+
+func (d *Decoder) ResetBytes(in []byte) {
+ // d.s = d.sa[:0]
+ d.rb.reset(in)
+ d.r = &d.rb
+ d.resetCommon()
+}
+
+// func (d *Decoder) sendContainerState(c containerState) {
+// if d.cr != nil {
+// d.cr.sendContainerState(c)
+// }
+// }
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+// // Decoding into a non-nil typed value
+// var f float32
+// err = codec.NewDecoder(r, handle).Decode(&f)
+//
+// // Decoding into nil interface
+// var v interface{}
+// dec := codec.NewDecoder(r, handle)
+// err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+// - Numbers are decoded as float64, int64 or uint64.
+// - Other values are decoded appropriately depending on the type:
+// bool, string, []byte, time.Time, etc
+// - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+// - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container.
+// - A map can be decoded from a stream map, by updating matching keys.
+// - A slice can be decoded from a stream array,
+// by updating the first n elements, where n is length of the stream.
+// - A slice can be decoded from a stream map, by decoding as if
+// it contains a sequence of key-value pairs.
+// - A struct can be decoded from a stream map, by updating matching fields.
+// - A struct can be decoded from a stream array,
+// by updating fields as they occur in the struct (by index).
+//
+// When decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+func (d *Decoder) Decode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ d.decode(v)
+ return
+}
+
+// this is not a smart swallow, as it allocates objects and does unnecessary work.
+func (d *Decoder) swallowViaHammer() {
+ var blank interface{}
+ d.decodeValue(reflect.ValueOf(&blank).Elem(), nil)
+}
+
+func (d *Decoder) swallow() {
+ // smarter decode that just swallows the content
+ dd := d.d
+ if dd.TryDecodeAsNil() {
+ return
+ }
+ cr := d.cr
+ switch dd.ContainerType() {
+ case valueTypeMap:
+ containerLen := dd.ReadMapStart()
+ clenGtEqualZero := containerLen >= 0
+ for j := 0; ; j++ {
+ if clenGtEqualZero {
+ if j >= containerLen {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.swallow()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.swallow()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ case valueTypeArray:
+ containerLenS := dd.ReadArrayStart()
+ clenGtEqualZero := containerLenS >= 0
+ for j := 0; ; j++ {
+ if clenGtEqualZero {
+ if j >= containerLenS {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ d.swallow()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ case valueTypeBytes:
+ dd.DecodeBytes(d.b[:], false, true)
+ case valueTypeString:
+ dd.DecodeBytes(d.b[:], true, true)
+ // dd.DecodeStringAsBytes(d.b[:])
+ default:
+ // these are all primitives, which we can get from decodeNaked
+ // if RawExt using Value, complete the processing.
+ dd.DecodeNaked()
+ if n := &d.n; n.v == valueTypeExt && n.l == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ d.decode(v2)
+ n.is = n.is[:l]
+ }
+ }
+}
+
+// MustDecode is like Decode, but panics if unable to Decode.
+// This provides insight to the code location that triggered the error.
+func (d *Decoder) MustDecode(v interface{}) {
+ d.decode(v)
+}
+
+func (d *Decoder) decode(iv interface{}) {
+ // if ics, ok := iv.(Selfer); ok {
+ // ics.CodecDecodeSelf(d)
+ // return
+ // }
+
+ if d.d.TryDecodeAsNil() {
+ switch v := iv.(type) {
+ case nil:
+ case *string:
+ *v = ""
+ case *bool:
+ *v = false
+ case *int:
+ *v = 0
+ case *int8:
+ *v = 0
+ case *int16:
+ *v = 0
+ case *int32:
+ *v = 0
+ case *int64:
+ *v = 0
+ case *uint:
+ *v = 0
+ case *uint8:
+ *v = 0
+ case *uint16:
+ *v = 0
+ case *uint32:
+ *v = 0
+ case *uint64:
+ *v = 0
+ case *float32:
+ *v = 0
+ case *float64:
+ *v = 0
+ case *[]uint8:
+ *v = nil
+ case reflect.Value:
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
+ v = v.Elem()
+ if v.IsValid() {
+ v.Set(reflect.Zero(v.Type()))
+ }
+ default:
+ rv := reflect.ValueOf(iv)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
+ rv = rv.Elem()
+ if rv.IsValid() {
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ }
+ return
+ }
+
+ switch v := iv.(type) {
+ case nil:
+ d.error(cannotDecodeIntoNilErr)
+ return
+
+ case Selfer:
+ v.CodecDecodeSelf(d)
+
+ case reflect.Value:
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
+ d.decodeValueNotNil(v.Elem(), nil)
+
+ case *string:
+
+ *v = d.d.DecodeString()
+ case *bool:
+ *v = d.d.DecodeBool()
+ case *int:
+ *v = int(d.d.DecodeInt(intBitsize))
+ case *int8:
+ *v = int8(d.d.DecodeInt(8))
+ case *int16:
+ *v = int16(d.d.DecodeInt(16))
+ case *int32:
+ *v = int32(d.d.DecodeInt(32))
+ case *int64:
+ *v = d.d.DecodeInt(64)
+ case *uint:
+ *v = uint(d.d.DecodeUint(uintBitsize))
+ case *uint8:
+ *v = uint8(d.d.DecodeUint(8))
+ case *uint16:
+ *v = uint16(d.d.DecodeUint(16))
+ case *uint32:
+ *v = uint32(d.d.DecodeUint(32))
+ case *uint64:
+ *v = d.d.DecodeUint(64)
+ case *float32:
+ *v = float32(d.d.DecodeFloat(true))
+ case *float64:
+ *v = d.d.DecodeFloat(false)
+ case *[]uint8:
+ *v = d.d.DecodeBytes(*v, false, false)
+
+ case *interface{}:
+ d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
+
+ default:
+ if !fastpathDecodeTypeSwitch(iv, d) {
+ d.decodeI(iv, true, false, false, false)
+ }
+ }
+}
+
+func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) {
+ if tryNil && d.d.TryDecodeAsNil() {
+ // No need to check if a ptr, recursively, to determine
+ // whether to set value to nil.
+ // Just always set value to its zero type.
+ if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ return
+ }
+
+ // If stream is not containing a nil value, then we can deref to the base
+ // non-pointer value, and decode into that.
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ return rv, true
+}
+
+func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) {
+ rv := reflect.ValueOf(iv)
+ if checkPtr {
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
+ }
+ rv, proceed := d.preDecodeValue(rv, tryNil)
+ if proceed {
+ fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer)
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) {
+ if rv, proceed := d.preDecodeValue(rv, true); proceed {
+ if fn == nil {
+ fn = d.getDecFn(rv.Type(), true, true)
+ }
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) {
+ if rv, proceed := d.preDecodeValue(rv, false); proceed {
+ if fn == nil {
+ fn = d.getDecFn(rv.Type(), true, true)
+ }
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) {
+ rtid := reflect.ValueOf(rt).Pointer()
+
+ // retrieve or register a focus'ed function for this type
+ // to eliminate need to do the retrieval multiple times
+
+ // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) }
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = d.f[rtid]
+ } else {
+ for i := range d.s {
+ v := &(d.s[i])
+ if v.rtid == rtid {
+ fn, ok = &(v.fn), true
+ break
+ }
+ }
+ }
+ if ok {
+ return
+ }
+
+ if useMapForCodecCache {
+ if d.f == nil {
+ d.f = make(map[uintptr]*decFn, initCollectionCap)
+ }
+ fn = new(decFn)
+ d.f[rtid] = fn
+ } else {
+ if d.s == nil {
+ d.s = make([]decRtidFn, 0, initCollectionCap)
+ }
+ d.s = append(d.s, decRtidFn{rtid: rtid})
+ fn = &(d.s[len(d.s)-1]).fn
+ }
+
+ // debugf("\tCreating new dec fn for type: %v\n", rt)
+ ti := d.h.getTypeInfo(rtid, rt)
+ fi := &(fn.i)
+ fi.d = d
+ fi.ti = ti
+
+ // An extension can be registered for any type, regardless of the Kind
+ // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc.
+ //
+ // We can't check if it's an extension byte here first, because the user may have
+ // registered a pointer or non-pointer type, meaning we may have to recurse first
+ // before matching a mapped type, even though the extension byte is already detected.
+ //
+ // NOTE: if decoding into a nil interface{}, we return a non-nil
+ // value except even if the container registers a length of 0.
+ if checkCodecSelfer && ti.cs {
+ fn.f = (*decFnInfo).selferUnmarshal
+ } else if rtid == rawExtTypId {
+ fn.f = (*decFnInfo).rawExt
+ } else if d.d.IsBuiltinType(rtid) {
+ fn.f = (*decFnInfo).builtin
+ } else if xfFn := d.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.f = (*decFnInfo).ext
+ } else if supportMarshalInterfaces && d.be && ti.bunm {
+ fn.f = (*decFnInfo).binaryUnmarshal
+ } else if supportMarshalInterfaces && !d.be && d.js && ti.junm {
+ //If JSON, we should check JSONUnmarshal before textUnmarshal
+ fn.f = (*decFnInfo).jsonUnmarshal
+ } else if supportMarshalInterfaces && !d.be && ti.tunm {
+ fn.f = (*decFnInfo).textUnmarshal
+ } else {
+ rk := rt.Kind()
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if rt.PkgPath() == "" {
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.f = fastpathAV[idx].decfn
+ }
+ } else {
+ // use mapping for underlying type if there
+ ok = false
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(rt.Key(), rt.Elem())
+ } else {
+ rtu = reflect.SliceOf(rt.Elem())
+ }
+ rtuid := reflect.ValueOf(rtu).Pointer()
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].decfn
+ xrt := fastpathAV[idx].rt
+ fn.f = func(xf *decFnInfo, xrv reflect.Value) {
+ // xfnf(xf, xrv.Convert(xrt))
+ xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem())
+ }
+ }
+ }
+ }
+ if fn.f == nil {
+ switch rk {
+ case reflect.String:
+ fn.f = (*decFnInfo).kString
+ case reflect.Bool:
+ fn.f = (*decFnInfo).kBool
+ case reflect.Int:
+ fn.f = (*decFnInfo).kInt
+ case reflect.Int64:
+ fn.f = (*decFnInfo).kInt64
+ case reflect.Int32:
+ fn.f = (*decFnInfo).kInt32
+ case reflect.Int8:
+ fn.f = (*decFnInfo).kInt8
+ case reflect.Int16:
+ fn.f = (*decFnInfo).kInt16
+ case reflect.Float32:
+ fn.f = (*decFnInfo).kFloat32
+ case reflect.Float64:
+ fn.f = (*decFnInfo).kFloat64
+ case reflect.Uint8:
+ fn.f = (*decFnInfo).kUint8
+ case reflect.Uint64:
+ fn.f = (*decFnInfo).kUint64
+ case reflect.Uint:
+ fn.f = (*decFnInfo).kUint
+ case reflect.Uint32:
+ fn.f = (*decFnInfo).kUint32
+ case reflect.Uint16:
+ fn.f = (*decFnInfo).kUint16
+ // case reflect.Ptr:
+ // fn.f = (*decFnInfo).kPtr
+ case reflect.Uintptr:
+ fn.f = (*decFnInfo).kUintptr
+ case reflect.Interface:
+ fn.f = (*decFnInfo).kInterface
+ case reflect.Struct:
+ fn.f = (*decFnInfo).kStruct
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.f = (*decFnInfo).kArray
+ case reflect.Map:
+ fn.f = (*decFnInfo).kMap
+ default:
+ fn.f = (*decFnInfo).kErr
+ }
+ }
+ }
+
+ return
+}
+
+func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
+ if d.h.ErrorIfNoField {
+ if index >= 0 {
+ d.errorf("no matching struct field found when decoding stream array at index %v", index)
+ return
+ } else if rvkencname != "" {
+ d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname)
+ return
+ }
+ }
+ d.swallow()
+}
+
+func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
+ if d.h.ErrorIfNoArrayExpand {
+ d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
+ }
+}
+
+func (d *Decoder) chkPtrValue(rv reflect.Value) {
+ // We can only decode into a non-nil pointer
+ if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+ return
+ }
+ d.errNotValidPtrValue(rv)
+}
+
+func (d *Decoder) errNotValidPtrValue(rv reflect.Value) {
+ if !rv.IsValid() {
+ d.error(cannotDecodeIntoNilErr)
+ return
+ }
+ if !rv.CanInterface() {
+ d.errorf("cannot decode into a value without an interface: %v", rv)
+ return
+ }
+ rvi := rv.Interface()
+ d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi)
+}
+
+func (d *Decoder) error(err error) {
+ panic(err)
+}
+
+func (d *Decoder) errorf(format string, params ...interface{}) {
+ params2 := make([]interface{}, len(params)+1)
+ params2[0] = d.r.numread()
+ copy(params2[1:], params)
+ err := fmt.Errorf("[pos %d]: "+format, params2...)
+ panic(err)
+}
+
+func (d *Decoder) string(v []byte) (s string) {
+ if d.is != nil {
+ s, ok := d.is[string(v)] // no allocation here.
+ if !ok {
+ s = string(v)
+ d.is[s] = s
+ }
+ return s
+ }
+ return string(v) // don't return stringView, as we need a real string here.
+}
+
+func (d *Decoder) intern(s string) {
+ if d.is != nil {
+ d.is[s] = s
+ }
+}
+
+// nextValueBytes returns the next value in the stream as a set of bytes.
+func (d *Decoder) nextValueBytes() []byte {
+ d.d.uncacheRead()
+ d.r.track()
+ d.swallow()
+ return d.r.stopTrack()
+}
+
+// --------------------------------------------------
+
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+type decSliceHelper struct {
+ d *Decoder
+ // ct valueType
+ array bool
+}
+
+func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
+ dd := d.d
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeArray {
+ x.array = true
+ clen = dd.ReadArrayStart()
+ } else if ctyp == valueTypeMap {
+ clen = dd.ReadMapStart() * 2
+ } else {
+ d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
+ }
+ // x.ct = ctyp
+ x.d = d
+ return
+}
+
+func (x decSliceHelper) End() {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayEnd)
+ } else {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayElem)
+ } else {
+ if index%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+}
+
+func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) {
+ if clen == 0 {
+ return zeroByteSlice
+ }
+ if len(bs) == clen {
+ bsOut = bs
+ } else if cap(bs) >= clen {
+ bsOut = bs[:clen]
+ } else {
+ bsOut = make([]byte, clen)
+ }
+ r.readb(bsOut)
+ return
+}
+
+func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
+ if xlen := len(in); xlen > 0 {
+ if isBytesReader || xlen <= scratchByteArrayLen {
+ if cap(dest) >= xlen {
+ out = dest[:xlen]
+ } else {
+ out = make([]byte, xlen)
+ }
+ copy(out, in)
+ return
+ }
+ }
+ return in
+}
+
+// decInferLen will infer a sensible length, given the following:
+// - clen: length wanted.
+// - maxlen: max length to be returned.
+// if <= 0, it is unset, and we infer it based on the unit size
+// - unit: number of bytes for each element of the collection
+func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ // handle when maxlen is not set i.e. <= 0
+ if clen <= 0 {
+ return
+ }
+ if maxlen <= 0 {
+ // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items.
+ // maxlen = 256 * 1024 / unit
+ // if maxlen < (4 * 1024) {
+ // maxlen = 4 * 1024
+ // }
+ if unit < (256 / 4) {
+ maxlen = 256 * 1024 / unit
+ } else {
+ maxlen = 4 * 1024
+ }
+ }
+ if clen > maxlen {
+ rvlen = maxlen
+ truncated = true
+ } else {
+ rvlen = clen
+ }
+ return
+ // if clen <= 0 {
+ // rvlen = 0
+ // } else if maxlen > 0 && clen > maxlen {
+ // rvlen = maxlen
+ // truncated = true
+ // } else {
+ // rvlen = clen
+ // }
+ // return
+}
+
+// // implement overall decReader wrapping both, for possible use inline:
+// type decReaderT struct {
+// bytes bool
+// rb *bytesDecReader
+// ri *ioDecReader
+// }
+//
+// // implement *Decoder as a decReader.
+// // Using decReaderT (defined just above) caused performance degradation
+// // possibly because of constant copying the value,
+// // and some value->interface conversion causing allocation.
+// func (d *Decoder) unreadn1() {
+// if d.bytes {
+// d.rb.unreadn1()
+// } else {
+// d.ri.unreadn1()
+// }
+// }
+// ... for other methods of decReader.
+// Testing showed that performance improvement was negligible.
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go
new file mode 100644
index 0000000..a874c74
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go
@@ -0,0 +1,1419 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+const (
+ defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+)
+
+// AsSymbolFlag defines what should be encoded as symbols.
+type AsSymbolFlag uint8
+
+const (
+ // AsSymbolDefault is default.
+ // Currently, this means only encode struct field names as symbols.
+ // The default is subject to change.
+ AsSymbolDefault AsSymbolFlag = iota
+
+ // AsSymbolAll means encode anything which could be a symbol as a symbol.
+ AsSymbolAll = 0xfe
+
+ // AsSymbolNone means do not encode anything as a symbol.
+ AsSymbolNone = 1 << iota
+
+ // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols.
+ AsSymbolMapStringKeysFlag
+
+ // AsSymbolStructFieldName means encode struct field names as symbols.
+ AsSymbolStructFieldNameFlag
+)
+
+// encWriter abstracts writing to a byte array or to an io.Writer.
+type encWriter interface {
+ writeb([]byte)
+ writestr(string)
+ writen1(byte)
+ writen2(byte, byte)
+ atEndOfEncode()
+}
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+ IsBuiltinType(rt uintptr) bool
+ EncodeBuiltin(rt uintptr, v interface{})
+ EncodeNil()
+ EncodeInt(i int64)
+ EncodeUint(i uint64)
+ EncodeBool(b bool)
+ EncodeFloat32(f float32)
+ EncodeFloat64(f float64)
+ // encodeExtPreamble(xtag byte, length int)
+ EncodeRawExt(re *RawExt, e *Encoder)
+ EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
+ EncodeArrayStart(length int)
+ EncodeMapStart(length int)
+ EncodeString(c charEncoding, v string)
+ EncodeSymbol(v string)
+ EncodeStringBytes(c charEncoding, v []byte)
+ //TODO
+ //encBignum(f *big.Int)
+ //encStringRunes(c charEncoding, v []rune)
+
+ reset()
+}
+
+type encDriverAsis interface {
+ EncodeAsis(v []byte)
+}
+
+type encNoSeparator struct{}
+
+func (_ encNoSeparator) EncodeEnd() {}
+
+type ioEncWriterWriter interface {
+ WriteByte(c byte) error
+ WriteString(s string) (n int, err error)
+ Write(p []byte) (n int, err error)
+}
+
+type ioEncStringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+type EncodeOptions struct {
+ // Encode a struct as an array, and not as a map
+ StructToArray bool
+
+ // Canonical representation means that encoding a value will always result in the same
+ // sequence of bytes.
+ //
+ // This only affects maps, as the iteration order for maps is random.
+ //
+ // The implementation MAY use the natural sort order for the map keys if possible:
+ //
+ // - If there is a natural sort order (ie for number, bool, string or []byte keys),
+ // then the map keys are first sorted in natural order and then written
+ // with corresponding map values to the strema.
+ // - If there is no natural sort order, then the map keys will first be
+ // encoded into []byte, and then sorted,
+ // before writing the sorted keys and the corresponding map values to the stream.
+ //
+ Canonical bool
+
+ // CheckCircularRef controls whether we check for circular references
+ // and error fast during an encode.
+ //
+ // If enabled, an error is received if a pointer to a struct
+ // references itself either directly or through one of its fields (iteratively).
+ //
+ // This is opt-in, as there may be a performance hit to checking circular references.
+ CheckCircularRef bool
+
+ // AsSymbols defines what should be encoded as symbols.
+ //
+ // Encoding as symbols can reduce the encoded size significantly.
+ //
+ // However, during decoding, each string to be encoded as a symbol must
+ // be checked to see if it has been seen before. Consequently, encoding time
+ // will increase if using symbols, because string comparisons has a clear cost.
+ //
+ // Sample values:
+ // AsSymbolNone
+ // AsSymbolAll
+ // AsSymbolMapStringKeys
+ // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
+ AsSymbols AsSymbolFlag
+}
+
+// ---------------------------------------------
+
+type simpleIoEncWriterWriter struct {
+ w io.Writer
+ bw io.ByteWriter
+ sw ioEncStringWriter
+}
+
+func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
+ if o.bw != nil {
+ return o.bw.WriteByte(c)
+ }
+ _, err = o.w.Write([]byte{c})
+ return
+}
+
+func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) {
+ if o.sw != nil {
+ return o.sw.WriteString(s)
+ }
+ // return o.w.Write([]byte(s))
+ return o.w.Write(bytesView(s))
+}
+
+func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) {
+ return o.w.Write(p)
+}
+
+// ----------------------------------------
+
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+ w ioEncWriterWriter
+ s simpleIoEncWriterWriter
+ // x [8]byte // temp byte array re-used internally for efficiency
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := z.w.Write(bs)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(bs) {
+ panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n))
+ }
+}
+
+func (z *ioEncWriter) writestr(s string) {
+ n, err := z.w.WriteString(s)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(s) {
+ panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n))
+ }
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+ if err := z.w.WriteByte(b); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writen2(b1 byte, b2 byte) {
+ z.writen1(b1)
+ z.writen1(b2)
+}
+
+func (z *ioEncWriter) atEndOfEncode() {}
+
+// ----------------------------------------
+
+// bytesEncWriter implements encWriter and can write to an byte slice.
+// It is used by Marshal function.
+type bytesEncWriter struct {
+ b []byte
+ c int // cursor
+ out *[]byte // write out on atEndOfEncode
+}
+
+func (z *bytesEncWriter) writeb(s []byte) {
+ if len(s) > 0 {
+ c := z.grow(len(s))
+ copy(z.b[c:], s)
+ }
+}
+
+func (z *bytesEncWriter) writestr(s string) {
+ if len(s) > 0 {
+ c := z.grow(len(s))
+ copy(z.b[c:], s)
+ }
+}
+
+func (z *bytesEncWriter) writen1(b1 byte) {
+ c := z.grow(1)
+ z.b[c] = b1
+}
+
+func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
+ c := z.grow(2)
+ z.b[c] = b1
+ z.b[c+1] = b2
+}
+
+func (z *bytesEncWriter) atEndOfEncode() {
+ *(z.out) = z.b[:z.c]
+}
+
+func (z *bytesEncWriter) grow(n int) (oldcursor int) {
+ oldcursor = z.c
+ z.c = oldcursor + n
+ if z.c > len(z.b) {
+ if z.c > cap(z.b) {
+ // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
+ // bytes.Buffer model (2*cap + n): much better
+ // bs := make([]byte, 2*cap(z.b)+n)
+ bs := make([]byte, growCap(cap(z.b), 1, n))
+ copy(bs, z.b[:oldcursor])
+ z.b = bs
+ } else {
+ z.b = z.b[:cap(z.b)]
+ }
+ }
+ return
+}
+
+// ---------------------------------------------
+
+type encFnInfo struct {
+ e *Encoder
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+}
+
+func (f *encFnInfo) builtin(rv reflect.Value) {
+ f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
+}
+
+func (f *encFnInfo) rawExt(rv reflect.Value) {
+ // rev := rv.Interface().(RawExt)
+ // f.e.e.EncodeRawExt(&rev, f.e)
+ var re *RawExt
+ if rv.CanAddr() {
+ re = rv.Addr().Interface().(*RawExt)
+ } else {
+ rev := rv.Interface().(RawExt)
+ re = &rev
+ }
+ f.e.e.EncodeRawExt(re, f.e)
+}
+
+func (f *encFnInfo) ext(rv reflect.Value) {
+ // if this is a struct|array and it was addressable, then pass the address directly (not the value)
+ if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() {
+ rv = rv.Addr()
+ }
+ f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e)
+}
+
+func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) {
+ if indir == 0 {
+ v = rv.Interface()
+ } else if indir == -1 {
+ // If a non-pointer was passed to Encode(), then that value is not addressable.
+ // Take addr if addresable, else copy value to an addressable value.
+ if rv.CanAddr() {
+ v = rv.Addr().Interface()
+ } else {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v)
+ }
+ } else {
+ for j := int8(0); j < indir; j++ {
+ if rv.IsNil() {
+ f.e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ }
+ v = rv.Interface()
+ }
+ return v, true
+}
+
+func (f *encFnInfo) selferMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed {
+ v.(Selfer).CodecEncodeSelf(f.e)
+ }
+}
+
+func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed {
+ bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+ }
+}
+
+func (f *encFnInfo) textMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed {
+ // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface())
+ bs, fnerr := v.(encoding.TextMarshaler).MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+ }
+}
+
+func (f *encFnInfo) jsonMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed {
+ bs, fnerr := v.(jsonMarshaler).MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+ }
+}
+
+func (f *encFnInfo) kBool(rv reflect.Value) {
+ f.e.e.EncodeBool(rv.Bool())
+}
+
+func (f *encFnInfo) kString(rv reflect.Value) {
+ f.e.e.EncodeString(c_UTF8, rv.String())
+}
+
+func (f *encFnInfo) kFloat64(rv reflect.Value) {
+ f.e.e.EncodeFloat64(rv.Float())
+}
+
+func (f *encFnInfo) kFloat32(rv reflect.Value) {
+ f.e.e.EncodeFloat32(float32(rv.Float()))
+}
+
+func (f *encFnInfo) kInt(rv reflect.Value) {
+ f.e.e.EncodeInt(rv.Int())
+}
+
+func (f *encFnInfo) kUint(rv reflect.Value) {
+ f.e.e.EncodeUint(rv.Uint())
+}
+
+func (f *encFnInfo) kInvalid(rv reflect.Value) {
+ f.e.e.EncodeNil()
+}
+
+func (f *encFnInfo) kErr(rv reflect.Value) {
+ f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
+}
+
+func (f *encFnInfo) kSlice(rv reflect.Value) {
+ ti := f.ti
+ // array may be non-addressable, so we have to manage with care
+ // (don't call rv.Bytes, rv.Slice, etc).
+ // E.g. type struct S{B [2]byte};
+ // Encode(S{}) will bomb on "panic: slice of unaddressable array".
+ e := f.e
+ if f.seq != seqTypeArray {
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ // If in this method, then there was no extension function defined.
+ // So it's okay to treat as []byte.
+ if ti.rtid == uint8SliceTypId {
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ return
+ }
+ }
+ cr := e.cr
+ rtelem := ti.rt.Elem()
+ l := rv.Len()
+ if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 {
+ switch f.seq {
+ case seqTypeArray:
+ // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else
+ if rv.CanAddr() {
+ e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
+ } else {
+ var bs []byte
+ if l <= cap(e.b) {
+ bs = e.b[:l]
+ } else {
+ bs = make([]byte, l)
+ }
+ reflect.Copy(reflect.ValueOf(bs), rv)
+ // TODO: Test that reflect.Copy works instead of manual one-by-one
+ // for i := 0; i < l; i++ {
+ // bs[i] = byte(rv.Index(i).Uint())
+ // }
+ e.e.EncodeStringBytes(c_RAW, bs)
+ }
+ case seqTypeSlice:
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ case seqTypeChan:
+ bs := e.b[:0]
+ // do not use range, so that the number of elements encoded
+ // does not change, and encoding does not hang waiting on someone to close chan.
+ // for b := range rv.Interface().(<-chan byte) {
+ // bs = append(bs, b)
+ // }
+ ch := rv.Interface().(<-chan byte)
+ for i := 0; i < l; i++ {
+ bs = append(bs, <-ch)
+ }
+ e.e.EncodeStringBytes(c_RAW, bs)
+ }
+ return
+ }
+
+ if ti.mbs {
+ if l%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", l)
+ return
+ }
+ e.e.EncodeMapStart(l / 2)
+ } else {
+ e.e.EncodeArrayStart(l)
+ }
+
+ if l > 0 {
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ // if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var fn *encFn
+ if rtelem.Kind() != reflect.Interface {
+ rtelemid := reflect.ValueOf(rtelem).Pointer()
+ fn = e.getEncFn(rtelemid, rtelem, true, true)
+ }
+ // TODO: Consider perf implication of encoding odd index values as symbols if type is string
+ for j := 0; j < l; j++ {
+ if cr != nil {
+ if ti.mbs {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ } else {
+ cr.sendContainerState(containerArrayElem)
+ }
+ }
+ if f.seq == seqTypeChan {
+ if rv2, ok2 := rv.Recv(); ok2 {
+ e.encodeValue(rv2, fn)
+ } else {
+ e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received.
+ }
+ } else {
+ e.encodeValue(rv.Index(j), fn)
+ }
+ }
+ }
+
+ if cr != nil {
+ if ti.mbs {
+ cr.sendContainerState(containerMapEnd)
+ } else {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
+}
+
+func (f *encFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ e := f.e
+ cr := e.cr
+ tisfi := fti.sfip
+ toMap := !(fti.toArray || e.h.StructToArray)
+ newlen := len(fti.sfi)
+
+ // Use sync.Pool to reduce allocating slices unnecessarily.
+ // The cost of sync.Pool is less than the cost of new allocation.
+ pool, poolv, fkvs := encStructPoolGet(newlen)
+
+ // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
+ if toMap {
+ tisfi = fti.sfi
+ }
+ newlen = 0
+ var kv stringRv
+ for _, si := range tisfi {
+ kv.r = si.field(rv, false)
+ if toMap {
+ if si.omitEmpty && isEmptyValue(kv.r) {
+ continue
+ }
+ kv.v = si.encName
+ } else {
+ // use the zero value.
+ // if a reference or struct, set to nil (so you do not output too much)
+ if si.omitEmpty && isEmptyValue(kv.r) {
+ switch kv.r.Kind() {
+ case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
+ reflect.Map, reflect.Slice:
+ kv.r = reflect.Value{} //encode as nil
+ }
+ }
+ }
+ fkvs[newlen] = kv
+ newlen++
+ }
+
+ // debugf(">>>> kStruct: newlen: %v", newlen)
+ // sep := !e.be
+ ee := e.e //don't dereference everytime
+
+ if toMap {
+ ee.EncodeMapStart(newlen)
+ // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(kv.v)
+ } else {
+ ee.EncodeString(c_UTF8, kv.v)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(kv.r, nil)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else {
+ ee.EncodeArrayStart(newlen)
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encodeValue(kv.r, nil)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
+
+ // do not use defer. Instead, use explicit pool return at end of function.
+ // defer has a cost we are trying to avoid.
+ // If there is a panic and these slices are not returned, it is ok.
+ if pool != nil {
+ pool.Put(poolv)
+ }
+}
+
+// func (f *encFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// f.e.e.encodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem())
+// }
+
+// func (f *encFnInfo) kInterface(rv reflect.Value) {
+// println("kInterface called")
+// debug.PrintStack()
+// if rv.IsNil() {
+// f.e.e.EncodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem(), nil)
+// }
+
+func (f *encFnInfo) kMap(rv reflect.Value) {
+ ee := f.e.e
+ if rv.IsNil() {
+ ee.EncodeNil()
+ return
+ }
+
+ l := rv.Len()
+ ee.EncodeMapStart(l)
+ e := f.e
+ cr := e.cr
+ if l == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+ var asSymbols bool
+ // determine the underlying key and val encFn's for the map.
+ // This eliminates some work which is done for each loop iteration i.e.
+ // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
+ //
+ // However, if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var keyFn, valFn *encFn
+ ti := f.ti
+ rtkey := ti.rt.Key()
+ rtval := ti.rt.Elem()
+ rtkeyid := reflect.ValueOf(rtkey).Pointer()
+ // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String
+ var keyTypeIsString = rtkeyid == stringTypId
+ if keyTypeIsString {
+ asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ } else {
+ for rtkey.Kind() == reflect.Ptr {
+ rtkey = rtkey.Elem()
+ }
+ if rtkey.Kind() != reflect.Interface {
+ rtkeyid = reflect.ValueOf(rtkey).Pointer()
+ keyFn = e.getEncFn(rtkeyid, rtkey, true, true)
+ }
+ }
+ for rtval.Kind() == reflect.Ptr {
+ rtval = rtval.Elem()
+ }
+ if rtval.Kind() != reflect.Interface {
+ rtvalid := reflect.ValueOf(rtval).Pointer()
+ valFn = e.getEncFn(rtvalid, rtval, true, true)
+ }
+ mks := rv.MapKeys()
+ // for j, lmks := 0, len(mks); j < lmks; j++ {
+
+ if e.h.Canonical {
+ e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols)
+ } else {
+ for j := range mks {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if keyTypeIsString {
+ if asSymbols {
+ ee.EncodeSymbol(mks[j].String())
+ } else {
+ ee.EncodeString(c_UTF8, mks[j].String())
+ }
+ } else {
+ e.encodeValue(mks[j], keyFn)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mks[j]), valFn)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) {
+ ee := e.e
+ cr := e.cr
+ // we previously did out-of-band if an extension was registered.
+ // This is not necessary, as the natural kind is sufficient for ordering.
+
+ if rtkeyid == uint8SliceTypId {
+ mksv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bytes()
+ }
+ sort.Sort(bytesRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeStringBytes(c_RAW, mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ } else {
+ switch rtkey.Kind() {
+ case reflect.Bool:
+ mksv := make([]boolRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bool()
+ }
+ sort.Sort(boolRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.String:
+ mksv := make([]stringRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.String()
+ }
+ sort.Sort(stringRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(mksv[i].v)
+ } else {
+ ee.EncodeString(c_UTF8, mksv[i].v)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
+ mksv := make([]uintRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Uint()
+ }
+ sort.Sort(uintRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ mksv := make([]intRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Int()
+ }
+ sort.Sort(intRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Float32:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(mksv[i].v))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Float64:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ default:
+ // out-of-band
+ // first encode each key to a []byte first, then sort them, then record
+ var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ mksbv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksbv[i]
+ l := len(mksv)
+ e2.MustEncode(k)
+ v.r = k
+ v.v = mksv[l:]
+ // fmt.Printf(">>>>> %s\n", mksv[l:])
+ }
+ sort.Sort(bytesRvSlice(mksbv))
+ for j := range mksbv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(mksbv[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksbv[j].r), valFn)
+ }
+ }
+ }
+}
+
+// --------------------------------------------------
+
+// encFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type encFn struct {
+ i encFnInfo
+ f func(*encFnInfo, reflect.Value)
+}
+
+// --------------------------------------------------
+
+type encRtidFn struct {
+ rtid uintptr
+ fn encFn
+}
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+ // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
+ e encDriver
+ // NOTE: Encoder shouldn't call it's write methods,
+ // as the handler MAY need to do some coordination.
+ w encWriter
+ s []encRtidFn
+ ci set
+ be bool // is binary encoding
+ js bool // is json handle
+
+ wi ioEncWriter
+ wb bytesEncWriter
+
+ h *BasicHandle
+ hh Handle
+
+ cr containerStateRecv
+ as encDriverAsis
+
+ f map[uintptr]*encFn
+ b [scratchByteArrayLen]byte
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.Reset(w)
+ return e
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.ResetBytes(out)
+ return e
+}
+
+func newEncoder(h Handle) *Encoder {
+ e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ _, e.js = h.(*JsonHandle)
+ e.e = h.newEncDriver(e)
+ e.as, _ = e.e.(encDriverAsis)
+ e.cr, _ = e.e.(containerStateRecv)
+ return e
+}
+
+// Reset the Encoder with a new output stream.
+//
+// This accomodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+ ww, ok := w.(ioEncWriterWriter)
+ if ok {
+ e.wi.w = ww
+ } else {
+ sww := &e.wi.s
+ sww.w = w
+ sww.bw, _ = w.(io.ByteWriter)
+ sww.sw, _ = w.(ioEncStringWriter)
+ e.wi.w = sww
+ //ww = bufio.NewWriterSize(w, defEncByteBufSize)
+ }
+ e.w = &e.wi
+ e.e.reset()
+}
+
+func (e *Encoder) ResetBytes(out *[]byte) {
+ in := *out
+ if in == nil {
+ in = make([]byte, defEncByteBufSize)
+ }
+ e.wb.b, e.wb.out, e.wb.c = in, out, 0
+ e.w = &e.wb
+ e.e.reset()
+}
+
+// func (e *Encoder) sendContainerState(c containerState) {
+// if e.cr != nil {
+// e.cr.sendContainerState(c)
+// }
+// }
+
+// Encode writes an object into a stream.
+//
+// Encoding can be configured via the struct tag for the fields.
+// The "codec" key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+// Note that the "json" key is used in the absence of the "codec" key.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+// - the field's tag is "-", OR
+// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+//
+// However, struct values may encode as arrays. This happens when:
+// - StructToArray Encode option is set, OR
+// - the tag on the _struct field sets the "toarray" option
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline except:
+// - the struct tag specifies a replacement name (first value)
+// - the field is of an interface type
+//
+// Examples:
+//
+// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
+// type MyStruct struct {
+// _struct bool `codec:",omitempty"` //set omitempty for every field
+// Field1 string `codec:"-"` //skip this field
+// Field2 int `codec:"myName"` //Use key "myName" in encode stream
+// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
+// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+// io.Reader //use key "Reader".
+// MyStruct `codec:"my1" //use key "my1".
+// MyStruct //inline it
+// ...
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field
+// //and encode struct as an array
+// }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+// - If a Selfer, call its CodecEncodeSelf method
+// - If an extension is registered for it, call that extension function
+// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method
+// - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ e.encode(v)
+ e.w.atEndOfEncode()
+ return
+}
+
+// MustEncode is like Encode, but panics if unable to Encode.
+// This provides insight to the code location that triggered the error.
+func (e *Encoder) MustEncode(v interface{}) {
+ e.encode(v)
+ e.w.atEndOfEncode()
+}
+
+// comment out these (Must)Write methods. They were only put there to support cbor.
+// However, users already have access to the streams, and can write directly.
+//
+// // Write allows users write to the Encoder stream directly.
+// func (e *Encoder) Write(bs []byte) (err error) {
+// defer panicToErr(&err)
+// e.w.writeb(bs)
+// return
+// }
+// // MustWrite is like write, but panics if unable to Write.
+// func (e *Encoder) MustWrite(bs []byte) {
+// e.w.writeb(bs)
+// }
+
+func (e *Encoder) encode(iv interface{}) {
+ // if ics, ok := iv.(Selfer); ok {
+ // ics.CodecEncodeSelf(e)
+ // return
+ // }
+
+ switch v := iv.(type) {
+ case nil:
+ e.e.EncodeNil()
+ case Selfer:
+ v.CodecEncodeSelf(e)
+
+ case reflect.Value:
+ e.encodeValue(v, nil)
+
+ case string:
+ e.e.EncodeString(c_UTF8, v)
+ case bool:
+ e.e.EncodeBool(v)
+ case int:
+ e.e.EncodeInt(int64(v))
+ case int8:
+ e.e.EncodeInt(int64(v))
+ case int16:
+ e.e.EncodeInt(int64(v))
+ case int32:
+ e.e.EncodeInt(int64(v))
+ case int64:
+ e.e.EncodeInt(v)
+ case uint:
+ e.e.EncodeUint(uint64(v))
+ case uint8:
+ e.e.EncodeUint(uint64(v))
+ case uint16:
+ e.e.EncodeUint(uint64(v))
+ case uint32:
+ e.e.EncodeUint(uint64(v))
+ case uint64:
+ e.e.EncodeUint(v)
+ case float32:
+ e.e.EncodeFloat32(v)
+ case float64:
+ e.e.EncodeFloat64(v)
+
+ case []uint8:
+ e.e.EncodeStringBytes(c_RAW, v)
+
+ case *string:
+ e.e.EncodeString(c_UTF8, *v)
+ case *bool:
+ e.e.EncodeBool(*v)
+ case *int:
+ e.e.EncodeInt(int64(*v))
+ case *int8:
+ e.e.EncodeInt(int64(*v))
+ case *int16:
+ e.e.EncodeInt(int64(*v))
+ case *int32:
+ e.e.EncodeInt(int64(*v))
+ case *int64:
+ e.e.EncodeInt(*v)
+ case *uint:
+ e.e.EncodeUint(uint64(*v))
+ case *uint8:
+ e.e.EncodeUint(uint64(*v))
+ case *uint16:
+ e.e.EncodeUint(uint64(*v))
+ case *uint32:
+ e.e.EncodeUint(uint64(*v))
+ case *uint64:
+ e.e.EncodeUint(*v)
+ case *float32:
+ e.e.EncodeFloat32(*v)
+ case *float64:
+ e.e.EncodeFloat64(*v)
+
+ case *[]uint8:
+ e.e.EncodeStringBytes(c_RAW, *v)
+
+ default:
+ const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer
+ if !fastpathEncodeTypeSwitch(iv, e) {
+ e.encodeI(iv, false, checkCodecSelfer1)
+ }
+ }
+}
+
+func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) {
+ // use a goto statement instead of a recursive function for ptr/interface.
+TOP:
+ switch rv.Kind() {
+ case reflect.Ptr:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
+ // TODO: Movable pointers will be an issue here. Future problem.
+ sptr = rv.UnsafeAddr()
+ break TOP
+ }
+ goto TOP
+ case reflect.Interface:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Slice, reflect.Map:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ case reflect.Invalid, reflect.Func:
+ e.e.EncodeNil()
+ return
+ }
+
+ proceed = true
+ rv2 = rv
+ return
+}
+
+func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr,
+ checkFastpath, checkCodecSelfer bool) {
+ if sptr != 0 {
+ if (&e.ci).add(sptr) {
+ e.errorf("circular reference found: # %d", sptr)
+ }
+ }
+ if fn == nil {
+ rt := rv.Type()
+ rtid := reflect.ValueOf(rt).Pointer()
+ // fn = e.getEncFn(rtid, rt, true, true)
+ fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
+ }
+ fn.f(&fn.i, rv)
+ if sptr != 0 {
+ (&e.ci).remove(sptr)
+ }
+}
+
+func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) {
+ if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed {
+ e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer)
+ }
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) {
+ // if a valid fn is passed, it MUST BE for the dereferenced type of rv
+ if rv, sptr, proceed := e.preEncodeValue(rv); proceed {
+ e.doEncodeValue(rv, fn, sptr, true, true)
+ }
+}
+
+func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) {
+ // rtid := reflect.ValueOf(rt).Pointer()
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = e.f[rtid]
+ } else {
+ for i := range e.s {
+ v := &(e.s[i])
+ if v.rtid == rtid {
+ fn, ok = &(v.fn), true
+ break
+ }
+ }
+ }
+ if ok {
+ return
+ }
+
+ if useMapForCodecCache {
+ if e.f == nil {
+ e.f = make(map[uintptr]*encFn, initCollectionCap)
+ }
+ fn = new(encFn)
+ e.f[rtid] = fn
+ } else {
+ if e.s == nil {
+ e.s = make([]encRtidFn, 0, initCollectionCap)
+ }
+ e.s = append(e.s, encRtidFn{rtid: rtid})
+ fn = &(e.s[len(e.s)-1]).fn
+ }
+
+ ti := e.h.getTypeInfo(rtid, rt)
+ fi := &(fn.i)
+ fi.e = e
+ fi.ti = ti
+
+ if checkCodecSelfer && ti.cs {
+ fn.f = (*encFnInfo).selferMarshal
+ } else if rtid == rawExtTypId {
+ fn.f = (*encFnInfo).rawExt
+ } else if e.e.IsBuiltinType(rtid) {
+ fn.f = (*encFnInfo).builtin
+ } else if xfFn := e.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.f = (*encFnInfo).ext
+ } else if supportMarshalInterfaces && e.be && ti.bm {
+ fn.f = (*encFnInfo).binaryMarshal
+ } else if supportMarshalInterfaces && !e.be && e.js && ti.jm {
+ //If JSON, we should check JSONMarshal before textMarshal
+ fn.f = (*encFnInfo).jsonMarshal
+ } else if supportMarshalInterfaces && !e.be && ti.tm {
+ fn.f = (*encFnInfo).textMarshal
+ } else {
+ rk := rt.Kind()
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if rt.PkgPath() == "" { // un-named slice or map
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.f = fastpathAV[idx].encfn
+ }
+ } else {
+ ok = false
+ // use mapping for underlying type if there
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(rt.Key(), rt.Elem())
+ } else {
+ rtu = reflect.SliceOf(rt.Elem())
+ }
+ rtuid := reflect.ValueOf(rtu).Pointer()
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].encfn
+ xrt := fastpathAV[idx].rt
+ fn.f = func(xf *encFnInfo, xrv reflect.Value) {
+ xfnf(xf, xrv.Convert(xrt))
+ }
+ }
+ }
+ }
+ if fn.f == nil {
+ switch rk {
+ case reflect.Bool:
+ fn.f = (*encFnInfo).kBool
+ case reflect.String:
+ fn.f = (*encFnInfo).kString
+ case reflect.Float64:
+ fn.f = (*encFnInfo).kFloat64
+ case reflect.Float32:
+ fn.f = (*encFnInfo).kFloat32
+ case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
+ fn.f = (*encFnInfo).kInt
+ case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr:
+ fn.f = (*encFnInfo).kUint
+ case reflect.Invalid:
+ fn.f = (*encFnInfo).kInvalid
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Struct:
+ fn.f = (*encFnInfo).kStruct
+ // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+ // case reflect.Ptr:
+ // fn.f = (*encFnInfo).kPtr
+ // case reflect.Interface:
+ // fn.f = (*encFnInfo).kInterface
+ case reflect.Map:
+ fn.f = (*encFnInfo).kMap
+ default:
+ fn.f = (*encFnInfo).kErr
+ }
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
+ if fnerr != nil {
+ panic(fnerr)
+ }
+ if bs == nil {
+ e.e.EncodeNil()
+ } else if asis {
+ e.asis(bs)
+ } else {
+ e.e.EncodeStringBytes(c, bs)
+ }
+}
+
+func (e *Encoder) asis(v []byte) {
+ if e.as == nil {
+ e.w.writeb(v)
+ } else {
+ e.as.EncodeAsis(v)
+ }
+}
+
+func (e *Encoder) errorf(format string, params ...interface{}) {
+ err := fmt.Errorf(format, params...)
+ panic(err)
+}
+
+// ----------------------------------------
+
+const encStructPoolLen = 5
+
+// encStructPool is an array of sync.Pool.
+// Each element of the array pools one of encStructPool(8|16|32|64).
+// It allows the re-use of slices up to 64 in length.
+// A performance cost of encoding structs was collecting
+// which values were empty and should be omitted.
+// We needed slices of reflect.Value and string to collect them.
+// This shared pool reduces the amount of unnecessary creation we do.
+// The cost is that of locking sometimes, but sync.Pool is efficient
+// enough to reduce thread contention.
+var encStructPool [encStructPoolLen]sync.Pool
+
+func init() {
+ encStructPool[0].New = func() interface{} { return new([8]stringRv) }
+ encStructPool[1].New = func() interface{} { return new([16]stringRv) }
+ encStructPool[2].New = func() interface{} { return new([32]stringRv) }
+ encStructPool[3].New = func() interface{} { return new([64]stringRv) }
+ encStructPool[4].New = func() interface{} { return new([128]stringRv) }
+}
+
+func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) {
+ // if encStructPoolLen != 5 { // constant chec, so removed at build time.
+ // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
+ // }
+ // idxpool := newlen / 8
+ if newlen <= 8 {
+ p = &encStructPool[0]
+ v = p.Get()
+ s = v.(*[8]stringRv)[:newlen]
+ } else if newlen <= 16 {
+ p = &encStructPool[1]
+ v = p.Get()
+ s = v.(*[16]stringRv)[:newlen]
+ } else if newlen <= 32 {
+ p = &encStructPool[2]
+ v = p.Get()
+ s = v.(*[32]stringRv)[:newlen]
+ } else if newlen <= 64 {
+ p = &encStructPool[3]
+ v = p.Get()
+ s = v.(*[64]stringRv)[:newlen]
+ } else if newlen <= 128 {
+ p = &encStructPool[4]
+ v = p.Get()
+ s = v.(*[128]stringRv)[:newlen]
+ } else {
+ s = make([]stringRv, newlen)
+ }
+ return
+}
+
+// ----------------------------------------
+
+// func encErr(format string, params ...interface{}) {
+// doPanic(msgTagEnc, format, params...)
+// }
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go
new file mode 100644
index 0000000..cf6e00d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -0,0 +1,39365 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
+// ************************************************************
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathCheckNilFalse = false // for reflect
+const fastpathCheckNilTrue = true // for type switch
+
+type fastpathT struct{}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+
+type fastpathA [271]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, 271 // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < 271 && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ if !fastpathEnabled {
+ return
+ }
+ i := 0
+ fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := reflect.ValueOf(xrt).Pointer()
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+
+ fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR)
+ fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR)
+ fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R)
+ fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R)
+ fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR)
+ fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R)
+ fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R)
+ fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R)
+ fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR)
+ fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR)
+ fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R)
+ fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R)
+ fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R)
+ fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R)
+ fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR)
+
+ fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR)
+ fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR)
+ fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR)
+ fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R)
+ fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R)
+ fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R)
+ fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R)
+ fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR)
+ fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR)
+ fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R)
+ fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R)
+ fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R)
+ fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R)
+ fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R)
+ fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R)
+ fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR)
+ fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR)
+ fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR)
+ fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR)
+ fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R)
+ fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R)
+ fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R)
+ fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R)
+ fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR)
+ fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR)
+ fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R)
+ fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R)
+ fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R)
+ fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R)
+ fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R)
+ fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R)
+ fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR)
+ fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR)
+ fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR)
+ fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR)
+ fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R)
+ fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R)
+ fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R)
+ fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R)
+ fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR)
+ fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR)
+ fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R)
+ fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R)
+ fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R)
+ fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R)
+ fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R)
+ fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R)
+ fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR)
+ fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR)
+ fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR)
+ fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR)
+ fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R)
+ fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R)
+ fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R)
+ fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R)
+ fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR)
+ fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR)
+ fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R)
+ fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R)
+ fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R)
+ fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R)
+ fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R)
+ fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R)
+ fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR)
+ fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR)
+ fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR)
+ fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR)
+ fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R)
+ fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R)
+ fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R)
+ fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R)
+ fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR)
+ fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR)
+ fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R)
+ fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R)
+ fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R)
+ fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R)
+ fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R)
+ fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R)
+ fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR)
+ fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR)
+ fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR)
+ fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR)
+ fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R)
+ fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R)
+ fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R)
+ fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R)
+ fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR)
+ fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR)
+ fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R)
+ fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R)
+ fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R)
+ fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R)
+ fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R)
+ fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R)
+ fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR)
+ fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR)
+ fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR)
+ fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR)
+ fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R)
+ fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R)
+ fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R)
+ fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R)
+ fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR)
+ fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR)
+ fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R)
+ fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R)
+ fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R)
+ fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R)
+ fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R)
+ fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R)
+ fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR)
+ fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR)
+ fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR)
+ fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR)
+ fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R)
+ fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R)
+ fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R)
+ fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R)
+ fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR)
+ fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR)
+ fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R)
+ fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R)
+ fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R)
+ fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R)
+ fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R)
+ fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R)
+ fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR)
+ fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR)
+ fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR)
+ fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR)
+ fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R)
+ fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R)
+ fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R)
+ fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R)
+ fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR)
+ fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR)
+ fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R)
+ fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R)
+ fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R)
+ fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R)
+ fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R)
+ fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R)
+ fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR)
+ fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR)
+ fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR)
+ fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR)
+ fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R)
+ fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R)
+ fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R)
+ fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R)
+ fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR)
+ fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR)
+ fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R)
+ fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R)
+ fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R)
+ fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R)
+ fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R)
+ fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R)
+ fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR)
+ fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR)
+ fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR)
+ fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR)
+ fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R)
+ fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R)
+ fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R)
+ fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R)
+ fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR)
+ fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR)
+ fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R)
+ fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R)
+ fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R)
+ fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R)
+ fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R)
+ fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R)
+ fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR)
+ fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR)
+ fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR)
+ fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR)
+ fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R)
+ fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R)
+ fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R)
+ fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R)
+ fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR)
+ fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR)
+ fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R)
+ fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R)
+ fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R)
+ fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R)
+ fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R)
+ fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R)
+ fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR)
+ fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR)
+ fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR)
+ fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR)
+ fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R)
+ fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R)
+ fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R)
+ fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R)
+ fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR)
+ fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR)
+ fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R)
+ fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R)
+ fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R)
+ fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R)
+ fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R)
+ fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R)
+ fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR)
+ fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR)
+ fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR)
+ fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR)
+ fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R)
+ fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R)
+ fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R)
+ fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R)
+ fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR)
+ fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR)
+ fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R)
+ fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R)
+ fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R)
+ fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R)
+ fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R)
+ fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R)
+ fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR)
+ fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR)
+ fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR)
+ fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR)
+ fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R)
+ fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R)
+ fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R)
+ fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R)
+ fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR)
+ fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR)
+ fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R)
+ fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R)
+ fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R)
+ fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R)
+ fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R)
+ fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R)
+ fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR)
+ fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR)
+ fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR)
+ fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR)
+ fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R)
+ fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R)
+ fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R)
+ fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R)
+ fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR)
+ fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR)
+ fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R)
+ fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R)
+ fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R)
+ fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R)
+ fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R)
+ fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R)
+ fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR)
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []string:
+ fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint:
+ fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int:
+ fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e)
+
+ case []string:
+ fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e)
+
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case []uint:
+ fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e)
+
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e)
+
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e)
+
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e)
+
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case []int:
+ fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e)
+
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e)
+
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e)
+
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e)
+
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e)
+
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeFloat32(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeFloat64(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeBool(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeBool(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
+ fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) {
+ fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) {
+ fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) {
+ fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) {
+ fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) {
+ fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) {
+ fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) {
+ fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) {
+ fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) {
+ fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) {
+ fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) {
+ fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) {
+ fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) {
+ fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) {
+ fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) {
+ fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) {
+ fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) {
+ fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) {
+ fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) {
+ fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) {
+ fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) {
+ fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) {
+ fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) {
+ fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) {
+ fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d)
+ case *[]interface{}:
+ v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]interface{}:
+ fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]interface{}:
+ v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]string:
+ fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]string:
+ v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint:
+ fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint:
+ v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint8:
+ fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint8:
+ v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint16:
+ fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint16:
+ v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint32:
+ fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint32:
+ v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint64:
+ fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint64:
+ v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uintptr:
+ fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uintptr:
+ v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int:
+ fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int:
+ v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int8:
+ fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int8:
+ v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int16:
+ fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int16:
+ v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int32:
+ fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int32:
+ v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int64:
+ fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int64:
+ v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]float32:
+ fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]float32:
+ v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]float64:
+ fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]float64:
+ v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]bool:
+ fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]bool:
+ v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []string:
+ fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d)
+ case *[]string:
+ v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]interface{}:
+ fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]interface{}:
+ v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]string:
+ fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]string:
+ v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint:
+ fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint:
+ v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint8:
+ fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint8:
+ v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint16:
+ fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint16:
+ v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint32:
+ fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint32:
+ v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint64:
+ fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint64:
+ v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uintptr:
+ fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uintptr:
+ v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int:
+ fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int:
+ v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int8:
+ fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int8:
+ v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int16:
+ fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int16:
+ v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int32:
+ fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int32:
+ v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int64:
+ fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int64:
+ v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]float32:
+ fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]float32:
+ v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]float64:
+ fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]float64:
+ v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]bool:
+ fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]bool:
+ v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []float32:
+ fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *[]float32:
+ v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]interface{}:
+ fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]interface{}:
+ v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]string:
+ fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]string:
+ v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint:
+ fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint:
+ v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint8:
+ fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint8:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint16:
+ fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint16:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint32:
+ fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint32:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint64:
+ fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint64:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uintptr:
+ fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uintptr:
+ v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int:
+ fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int:
+ v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int8:
+ fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int8:
+ v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int16:
+ fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int16:
+ v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int32:
+ fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int32:
+ v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int64:
+ fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int64:
+ v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]float32:
+ fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]float32:
+ v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]float64:
+ fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]float64:
+ v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]bool:
+ fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]bool:
+ v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []float64:
+ fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *[]float64:
+ v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]interface{}:
+ fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]interface{}:
+ v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]string:
+ fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]string:
+ v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint:
+ fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint:
+ v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint8:
+ fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint8:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint16:
+ fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint16:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint32:
+ fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint32:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint64:
+ fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint64:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uintptr:
+ fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uintptr:
+ v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int:
+ fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int:
+ v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int8:
+ fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int8:
+ v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int16:
+ fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int16:
+ v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int32:
+ fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int32:
+ v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int64:
+ fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int64:
+ v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]float32:
+ fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]float32:
+ v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]float64:
+ fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]float64:
+ v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]bool:
+ fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]bool:
+ v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint:
+ fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d)
+ case *[]uint:
+ v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]interface{}:
+ fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]interface{}:
+ v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]string:
+ fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]string:
+ v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint:
+ fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint:
+ v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint8:
+ fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint8:
+ v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint16:
+ fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint16:
+ v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint32:
+ fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint32:
+ v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint64:
+ fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint64:
+ v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uintptr:
+ fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uintptr:
+ v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int:
+ fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int:
+ v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int8:
+ fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int8:
+ v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int16:
+ fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int16:
+ v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int32:
+ fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int32:
+ v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int64:
+ fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int64:
+ v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]float32:
+ fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]float32:
+ v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]float64:
+ fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]float64:
+ v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]bool:
+ fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]bool:
+ v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]interface{}:
+ fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]string:
+ fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]string:
+ v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint:
+ fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint:
+ v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint8:
+ fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint8:
+ v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint16:
+ fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint16:
+ v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint32:
+ fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint32:
+ v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint64:
+ fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint64:
+ v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uintptr:
+ fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int:
+ fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int:
+ v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int8:
+ fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int8:
+ v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int16:
+ fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int16:
+ v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int32:
+ fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int32:
+ v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int64:
+ fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int64:
+ v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]float32:
+ fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]float32:
+ v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]float64:
+ fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]float64:
+ v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]bool:
+ fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]bool:
+ v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint16:
+ fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint16:
+ v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]interface{}:
+ fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]string:
+ fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]string:
+ v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint:
+ fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint:
+ v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint8:
+ fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint8:
+ v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint16:
+ fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint16:
+ v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint32:
+ fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint32:
+ v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint64:
+ fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint64:
+ v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uintptr:
+ fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int:
+ fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int:
+ v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int8:
+ fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int8:
+ v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int16:
+ fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int16:
+ v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int32:
+ fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int32:
+ v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int64:
+ fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int64:
+ v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]float32:
+ fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]float32:
+ v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]float64:
+ fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]float64:
+ v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]bool:
+ fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]bool:
+ v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint32:
+ fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint32:
+ v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]interface{}:
+ fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]string:
+ fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]string:
+ v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint:
+ fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint:
+ v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint8:
+ fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint8:
+ v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint16:
+ fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint16:
+ v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint32:
+ fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint32:
+ v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint64:
+ fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint64:
+ v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uintptr:
+ fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int:
+ fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int:
+ v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int8:
+ fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int8:
+ v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int16:
+ fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int16:
+ v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int32:
+ fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int32:
+ v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int64:
+ fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int64:
+ v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]float32:
+ fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]float32:
+ v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]float64:
+ fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]float64:
+ v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]bool:
+ fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]bool:
+ v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint64:
+ fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint64:
+ v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]interface{}:
+ fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]string:
+ fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]string:
+ v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint:
+ fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint:
+ v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint8:
+ fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint8:
+ v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint16:
+ fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint16:
+ v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint32:
+ fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint32:
+ v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint64:
+ fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint64:
+ v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uintptr:
+ fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int:
+ fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int:
+ v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int8:
+ fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int8:
+ v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int16:
+ fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int16:
+ v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int32:
+ fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int32:
+ v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int64:
+ fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int64:
+ v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]float32:
+ fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]float32:
+ v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]float64:
+ fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]float64:
+ v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]bool:
+ fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]bool:
+ v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uintptr:
+ fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *[]uintptr:
+ v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]interface{}:
+ fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]interface{}:
+ v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]string:
+ fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]string:
+ v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint:
+ fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint:
+ v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint8:
+ fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint8:
+ v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint16:
+ fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint16:
+ v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint32:
+ fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint32:
+ v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint64:
+ fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint64:
+ v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uintptr:
+ fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uintptr:
+ v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int:
+ fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int:
+ v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int8:
+ fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int8:
+ v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int16:
+ fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int16:
+ v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int32:
+ fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int32:
+ v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int64:
+ fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int64:
+ v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]float32:
+ fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]float32:
+ v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]float64:
+ fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]float64:
+ v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]bool:
+ fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]bool:
+ v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int:
+ fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d)
+ case *[]int:
+ v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]interface{}:
+ fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]interface{}:
+ v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]string:
+ fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]string:
+ v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint:
+ fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint:
+ v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint8:
+ fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint8:
+ v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint16:
+ fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint16:
+ v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint32:
+ fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint32:
+ v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint64:
+ fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint64:
+ v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uintptr:
+ fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uintptr:
+ v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int:
+ fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int:
+ v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int8:
+ fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int8:
+ v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int16:
+ fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int16:
+ v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int32:
+ fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int32:
+ v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int64:
+ fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int64:
+ v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]float32:
+ fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]float32:
+ v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]float64:
+ fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]float64:
+ v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]bool:
+ fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]bool:
+ v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int8:
+ fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d)
+ case *[]int8:
+ v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]interface{}:
+ fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]string:
+ fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]string:
+ v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint:
+ fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint:
+ v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint8:
+ fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint8:
+ v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint16:
+ fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint16:
+ v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint32:
+ fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint32:
+ v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint64:
+ fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint64:
+ v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uintptr:
+ fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int:
+ fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int:
+ v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int8:
+ fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int8:
+ v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int16:
+ fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int16:
+ v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int32:
+ fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int32:
+ v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int64:
+ fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int64:
+ v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]float32:
+ fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]float32:
+ v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]float64:
+ fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]float64:
+ v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]bool:
+ fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]bool:
+ v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int16:
+ fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d)
+ case *[]int16:
+ v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]interface{}:
+ fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]string:
+ fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]string:
+ v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint:
+ fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint:
+ v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint8:
+ fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint8:
+ v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint16:
+ fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint16:
+ v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint32:
+ fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint32:
+ v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint64:
+ fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint64:
+ v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uintptr:
+ fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int:
+ fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int:
+ v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int8:
+ fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int8:
+ v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int16:
+ fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int16:
+ v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int32:
+ fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int32:
+ v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int64:
+ fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int64:
+ v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]float32:
+ fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]float32:
+ v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]float64:
+ fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]float64:
+ v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]bool:
+ fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]bool:
+ v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int32:
+ fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d)
+ case *[]int32:
+ v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]interface{}:
+ fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]string:
+ fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]string:
+ v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint:
+ fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint:
+ v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint8:
+ fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint8:
+ v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint16:
+ fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint16:
+ v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint32:
+ fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint32:
+ v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint64:
+ fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint64:
+ v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uintptr:
+ fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int:
+ fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int:
+ v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int8:
+ fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int8:
+ v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int16:
+ fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int16:
+ v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int32:
+ fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int32:
+ v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int64:
+ fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int64:
+ v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]float32:
+ fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]float32:
+ v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]float64:
+ fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]float64:
+ v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]bool:
+ fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]bool:
+ v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int64:
+ fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d)
+ case *[]int64:
+ v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]interface{}:
+ fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]string:
+ fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]string:
+ v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint:
+ fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint:
+ v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint8:
+ fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint8:
+ v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint16:
+ fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint16:
+ v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint32:
+ fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint32:
+ v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint64:
+ fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint64:
+ v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uintptr:
+ fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int:
+ fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int:
+ v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int8:
+ fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int8:
+ v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int16:
+ fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int16:
+ v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int32:
+ fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int32:
+ v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int64:
+ fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int64:
+ v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]float32:
+ fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]float32:
+ v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]float64:
+ fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]float64:
+ v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]bool:
+ fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]bool:
+ v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []bool:
+ fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d)
+ case *[]bool:
+ v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]interface{}:
+ fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]interface{}:
+ v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]string:
+ fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]string:
+ v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint:
+ fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint:
+ v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint8:
+ fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint8:
+ v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint16:
+ fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint16:
+ v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint32:
+ fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint32:
+ v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint64:
+ fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint64:
+ v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uintptr:
+ fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uintptr:
+ v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int:
+ fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int:
+ v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int8:
+ fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int8:
+ v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int16:
+ fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int16:
+ v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int32:
+ fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int32:
+ v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int64:
+ fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int64:
+ v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]float32:
+ fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]float32:
+ v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]float64:
+ fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]float64:
+ v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]bool:
+ fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]bool:
+ v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]interface{})
+ v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]interface{})
+ fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ d.decode(&v[j])
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, nil)
+ slh.ElemContainerState(j)
+ d.decode(&v[j])
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]interface{}, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, nil)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ d.decode(&v[j])
+
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]string)
+ v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]string)
+ fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]string, xlen)
+ }
+ } else {
+ v = make([]string, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeString()
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, "")
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeString()
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]string, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, "")
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeString()
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]float32)
+ v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]float32)
+ fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float32, xlen)
+ }
+ } else {
+ v = make([]float32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = float32(dd.DecodeFloat(true))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = float32(dd.DecodeFloat(true))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]float32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = float32(dd.DecodeFloat(true))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]float64)
+ v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]float64)
+ fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float64, xlen)
+ }
+ } else {
+ v = make([]float64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeFloat(false)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeFloat(false)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]float64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeFloat(false)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint)
+ v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint)
+ fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint, xlen)
+ }
+ } else {
+ v = make([]uint, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint16)
+ v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint16)
+ fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint16, xlen)
+ }
+ } else {
+ v = make([]uint16, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint16(dd.DecodeUint(16))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint16(dd.DecodeUint(16))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint16, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint16(dd.DecodeUint(16))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint32)
+ v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint32)
+ fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint32, xlen)
+ }
+ } else {
+ v = make([]uint32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint32(dd.DecodeUint(32))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint32(dd.DecodeUint(32))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint32(dd.DecodeUint(32))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint64)
+ v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint64)
+ fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint64, xlen)
+ }
+ } else {
+ v = make([]uint64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeUint(64)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeUint(64)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeUint(64)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uintptr)
+ v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uintptr)
+ fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uintptr, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int)
+ v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int)
+ fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int, xlen)
+ }
+ } else {
+ v = make([]int, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int(dd.DecodeInt(intBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int(dd.DecodeInt(intBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int(dd.DecodeInt(intBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int8)
+ v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int8)
+ fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int8, xlen)
+ }
+ } else {
+ v = make([]int8, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int8(dd.DecodeInt(8))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int8(dd.DecodeInt(8))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int8, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int8(dd.DecodeInt(8))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int16)
+ v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int16)
+ fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int16, xlen)
+ }
+ } else {
+ v = make([]int16, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int16(dd.DecodeInt(16))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int16(dd.DecodeInt(16))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int16, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int16(dd.DecodeInt(16))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int32)
+ v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int32)
+ fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int32, xlen)
+ }
+ } else {
+ v = make([]int32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int32(dd.DecodeInt(32))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int32(dd.DecodeInt(32))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int32(dd.DecodeInt(32))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int64)
+ v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int64)
+ fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int64, xlen)
+ }
+ } else {
+ v = make([]int64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeInt(64)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeInt(64)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeInt(64)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]bool)
+ v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]bool)
+ fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]bool, xlen)
+ }
+ } else {
+ v = make([]bool, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeBool()
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, false)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeBool()
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]bool, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, false)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeBool()
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]interface{})
+ v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]interface{})
+ fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk interface{}
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]string)
+ v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]string)
+ fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]string, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint)
+ v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint)
+ fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint8)
+ v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint8)
+ fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]uint8, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint16)
+ v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint16)
+ fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]uint16, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint32)
+ v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint32)
+ fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]uint32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint64)
+ v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint64)
+ fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uintptr)
+ v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uintptr)
+ fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uintptr, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int)
+ v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int)
+ fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int8)
+ v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int8)
+ fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]int8, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int16)
+ v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int16)
+ fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]int16, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int32)
+ v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int32)
+ fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]int32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int64)
+ v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int64)
+ fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]float32)
+ v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]float32)
+ fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]float32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]float64)
+ v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]float64)
+ fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]float64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]bool)
+ v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]bool)
+ fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]bool, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]interface{})
+ v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]interface{})
+ fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk string
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]string)
+ v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]string)
+ fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]string, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint)
+ v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint)
+ fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint8)
+ v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint8)
+ fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]uint8, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint16)
+ v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint16)
+ fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]uint16, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint32)
+ v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint32)
+ fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]uint32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint64)
+ v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint64)
+ fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uintptr)
+ v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uintptr)
+ fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uintptr, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int)
+ v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int)
+ fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int8)
+ v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int8)
+ fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]int8, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int16)
+ v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int16)
+ fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]int16, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int32)
+ v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int32)
+ fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]int32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int64)
+ v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int64)
+ fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]float32)
+ v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]float32)
+ fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]float32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]float64)
+ v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]float64)
+ fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]float64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]bool)
+ v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]bool)
+ fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]bool, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]interface{})
+ v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]interface{})
+ fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]string)
+ v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]string)
+ fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]string, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint)
+ v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint)
+ fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint8)
+ v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint8)
+ fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]uint8, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint16)
+ v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint16)
+ fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]uint16, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint32)
+ v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint32)
+ fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]uint32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint64)
+ v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint64)
+ fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uintptr)
+ v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uintptr)
+ fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int)
+ v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int)
+ fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int8)
+ v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int8)
+ fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]int8, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int16)
+ v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int16)
+ fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]int16, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int32)
+ v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int32)
+ fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]int32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int64)
+ v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int64)
+ fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]float32)
+ v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]float32)
+ fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]float32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]float64)
+ v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]float64)
+ fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]float64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]bool)
+ v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]bool)
+ fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]bool, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]interface{})
+ v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]interface{})
+ fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]string)
+ v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]string)
+ fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]string, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint)
+ v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint)
+ fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint8)
+ v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint8)
+ fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]uint8, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint16)
+ v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint16)
+ fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]uint16, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint32)
+ v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint32)
+ fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]uint32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint64)
+ v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint64)
+ fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uintptr)
+ v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uintptr)
+ fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int)
+ v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int)
+ fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int8)
+ v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int8)
+ fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]int8, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int16)
+ v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int16)
+ fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]int16, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int32)
+ v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int32)
+ fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]int32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int64)
+ v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int64)
+ fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]float32)
+ v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]float32)
+ fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]float32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]float64)
+ v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]float64)
+ fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]float64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]bool)
+ v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]bool)
+ fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]bool, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]interface{})
+ v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]interface{})
+ fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]string)
+ v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]string)
+ fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]string, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint)
+ v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint)
+ fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint8)
+ v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint8)
+ fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint16)
+ v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint16)
+ fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint32)
+ v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint32)
+ fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint64)
+ v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint64)
+ fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uintptr)
+ v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uintptr)
+ fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int)
+ v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int)
+ fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int8)
+ v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int8)
+ fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]int8, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int16)
+ v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int16)
+ fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]int16, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int32)
+ v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int32)
+ fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]int32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int64)
+ v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int64)
+ fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]float32)
+ v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]float32)
+ fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]float32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]float64)
+ v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]float64)
+ fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]float64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]bool)
+ v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]bool)
+ fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]bool, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]interface{})
+ v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]interface{})
+ fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint8
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]string)
+ v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]string)
+ fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]string, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint)
+ v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint)
+ fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint8)
+ v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint8)
+ fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint16)
+ v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint16)
+ fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint32)
+ v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint32)
+ fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint64)
+ v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint64)
+ fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uintptr)
+ v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uintptr)
+ fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int)
+ v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int)
+ fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int8)
+ v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int8)
+ fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]int8, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int16)
+ v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int16)
+ fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]int16, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int32)
+ v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int32)
+ fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]int32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int64)
+ v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int64)
+ fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]float32)
+ v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]float32)
+ fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]float32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]float64)
+ v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]float64)
+ fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]float64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]bool)
+ v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]bool)
+ fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]bool, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]interface{})
+ v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]interface{})
+ fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint16
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]string)
+ v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]string)
+ fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]string, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint)
+ v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint)
+ fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint8)
+ v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint8)
+ fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint16)
+ v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint16)
+ fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint32)
+ v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint32)
+ fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint64)
+ v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint64)
+ fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uintptr)
+ v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uintptr)
+ fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int)
+ v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int)
+ fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int8)
+ v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int8)
+ fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]int8, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int16)
+ v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int16)
+ fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]int16, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int32)
+ v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int32)
+ fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]int32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int64)
+ v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int64)
+ fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]float32)
+ v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]float32)
+ fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]float32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]float64)
+ v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]float64)
+ fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]float64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]bool)
+ v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]bool)
+ fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]bool, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]interface{})
+ v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]interface{})
+ fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]string)
+ v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]string)
+ fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]string, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint)
+ v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint)
+ fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint8)
+ v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint8)
+ fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint16)
+ v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint16)
+ fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint32)
+ v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint32)
+ fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint64)
+ v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint64)
+ fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uintptr)
+ v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uintptr)
+ fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int)
+ v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int)
+ fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int8)
+ v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int8)
+ fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]int8, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int16)
+ v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int16)
+ fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]int16, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int32)
+ v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int32)
+ fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]int32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int64)
+ v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int64)
+ fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]float32)
+ v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]float32)
+ fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]float32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]float64)
+ v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]float64)
+ fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]float64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]bool)
+ v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]bool)
+ fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]bool, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]interface{})
+ v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]interface{})
+ fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]string)
+ v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]string)
+ fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]string, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint)
+ v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint)
+ fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint8)
+ v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint8)
+ fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint16)
+ v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint16)
+ fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint32)
+ v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint32)
+ fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint64)
+ v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint64)
+ fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uintptr)
+ v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uintptr)
+ fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int)
+ v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int)
+ fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int8)
+ v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int8)
+ fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]int8, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int16)
+ v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int16)
+ fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]int16, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int32)
+ v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int32)
+ fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]int32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int64)
+ v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int64)
+ fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]float32)
+ v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]float32)
+ fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]float32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]float64)
+ v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]float64)
+ fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]float64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]bool)
+ v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]bool)
+ fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]bool, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]interface{})
+ v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]interface{})
+ fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uintptr
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]string)
+ v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]string)
+ fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]string, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint)
+ v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint)
+ fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint8)
+ v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint8)
+ fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]uint8, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint16)
+ v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint16)
+ fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]uint16, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint32)
+ v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint32)
+ fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]uint32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint64)
+ v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint64)
+ fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uintptr)
+ v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uintptr)
+ fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int)
+ v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int)
+ fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int8)
+ v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int8)
+ fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]int8, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int16)
+ v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int16)
+ fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]int16, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int32)
+ v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int32)
+ fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]int32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int64)
+ v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int64)
+ fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]float32)
+ v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]float32)
+ fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]float32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]float64)
+ v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]float64)
+ fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]float64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]bool)
+ v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]bool)
+ fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]bool, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]interface{})
+ v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]interface{})
+ fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]string)
+ v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]string)
+ fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]string, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint)
+ v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint)
+ fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint8)
+ v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint8)
+ fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]uint8, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint16)
+ v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint16)
+ fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]uint16, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint32)
+ v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint32)
+ fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]uint32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint64)
+ v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint64)
+ fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uintptr)
+ v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uintptr)
+ fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int)
+ v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int)
+ fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int8)
+ v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int8)
+ fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]int8, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int16)
+ v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int16)
+ fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]int16, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int32)
+ v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int32)
+ fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]int32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int64)
+ v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int64)
+ fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]float32)
+ v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]float32)
+ fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]float32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]float64)
+ v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]float64)
+ fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]float64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]bool)
+ v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]bool)
+ fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]bool, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]interface{})
+ v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]interface{})
+ fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int8
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]string)
+ v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]string)
+ fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]string, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint)
+ v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint)
+ fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint8)
+ v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint8)
+ fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]uint8, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint16)
+ v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint16)
+ fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]uint16, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint32)
+ v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint32)
+ fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]uint32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint64)
+ v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint64)
+ fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uintptr)
+ v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uintptr)
+ fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int)
+ v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int)
+ fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int8)
+ v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int8)
+ fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]int8, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int16)
+ v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int16)
+ fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]int16, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int32)
+ v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int32)
+ fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]int32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int64)
+ v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int64)
+ fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]float32)
+ v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]float32)
+ fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]float32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]float64)
+ v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]float64)
+ fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]float64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]bool)
+ v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]bool)
+ fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]bool, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]interface{})
+ v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]interface{})
+ fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int16
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]string)
+ v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]string)
+ fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]string, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint)
+ v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint)
+ fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint8)
+ v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint8)
+ fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]uint8, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint16)
+ v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint16)
+ fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]uint16, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint32)
+ v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint32)
+ fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]uint32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint64)
+ v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint64)
+ fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uintptr)
+ v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uintptr)
+ fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int)
+ v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int)
+ fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int8)
+ v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int8)
+ fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]int8, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int16)
+ v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int16)
+ fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]int16, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int32)
+ v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int32)
+ fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]int32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int64)
+ v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int64)
+ fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]float32)
+ v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]float32)
+ fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]float32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]float64)
+ v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]float64)
+ fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]float64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]bool)
+ v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]bool)
+ fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]bool, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]interface{})
+ v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]interface{})
+ fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]string)
+ v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]string)
+ fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]string, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint)
+ v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint)
+ fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint8)
+ v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint8)
+ fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]uint8, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint16)
+ v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint16)
+ fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]uint16, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint32)
+ v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint32)
+ fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]uint32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint64)
+ v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint64)
+ fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uintptr)
+ v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uintptr)
+ fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int)
+ v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int)
+ fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int8)
+ v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int8)
+ fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]int8, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int16)
+ v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int16)
+ fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]int16, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int32)
+ v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int32)
+ fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]int32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int64)
+ v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int64)
+ fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]float32)
+ v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]float32)
+ fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]float32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]float64)
+ v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]float64)
+ fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]float64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]bool)
+ v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]bool)
+ fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]bool, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]interface{})
+ v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]interface{})
+ fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]string)
+ v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]string)
+ fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]string, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint)
+ v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint)
+ fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint8)
+ v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint8)
+ fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]uint8, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint16)
+ v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint16)
+ fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]uint16, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint32)
+ v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint32)
+ fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]uint32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint64)
+ v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint64)
+ fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uintptr)
+ v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uintptr)
+ fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int)
+ v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int)
+ fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int8)
+ v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int8)
+ fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]int8, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int16)
+ v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int16)
+ fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]int16, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int32)
+ v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int32)
+ fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]int32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int64)
+ v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int64)
+ fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]float32)
+ v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]float32)
+ fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]float32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]float64)
+ v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]float64)
+ fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]float64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]bool)
+ v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]bool)
+ fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]bool, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]interface{})
+ v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]interface{})
+ fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk bool
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]string)
+ v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]string)
+ fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]string, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint)
+ v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint)
+ fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint8)
+ v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint8)
+ fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]uint8, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint16)
+ v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint16)
+ fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]uint16, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint32)
+ v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint32)
+ fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]uint32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint64)
+ v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint64)
+ fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uintptr)
+ v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uintptr)
+ fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uintptr, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int)
+ v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int)
+ fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int8)
+ v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int8)
+ fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]int8, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int16)
+ v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int16)
+ fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]int16, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int32)
+ v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int32)
+ fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]int32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int64)
+ v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int64)
+ fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]float32)
+ v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]float32)
+ fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]float32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]float64)
+ v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]float64)
+ fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]float64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]bool)
+ v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]bool)
+ fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]bool, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
new file mode 100644
index 0000000..04c173f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
@@ -0,0 +1,540 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
+// ************************************************************
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathCheckNilFalse = false // for reflect
+const fastpathCheckNilTrue = true // for type switch
+
+type fastpathT struct {}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+
+type fastpathA [{{ .FastpathLen }}]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < {{ .FastpathLen }} && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ if !fastpathEnabled {
+ return
+ }
+ i := 0
+ fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := reflect.ValueOf(xrt).Pointer()
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+
+ {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
+
+ {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:{{else}}
+ case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }}
+ case *[]{{ .Elem }}:{{else}}
+ case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
+{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
+ case *[]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+
+func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil { cr.sendContainerState(containerArrayElem) }
+ {{ encmd .Elem "v2"}}
+ }
+ if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}}
+}
+
+func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ {{ encmd .Elem "v2"}}
+ }
+ if cr != nil { cr.sendContainerState(containerMapEnd) }
+}
+
+{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+
+func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ {{end}}if e.h.Canonical {
+ {{if eq .MapKey "interface{}"}}{{/* out of band
+ */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ e.asis(v2[j].v)
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ e.encode(v[v2[j].i])
+ } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = {{ $x }}(k)
+ i++
+ }
+ sort.Sort({{ sorttype .MapKey false}}(v2))
+ for _, k2 := range v2 {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{if eq .MapKey "string"}}if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+ } {{end}}
+ } else {
+ for k2, v2 := range v {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{if eq .MapKey "string"}}if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }{{else}}{{ encmd .MapKey "k2"}}{{end}}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ encmd .Elem "v2"}}
+ }
+ }
+ if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}}
+}
+
+{{end}}{{end}}{{end}}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:{{else}}
+ case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }}
+ case *[]{{ .Elem }}:{{else}}
+ case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+{{/*
+Slices can change if they
+- did not come from an array
+- are addressable (from a ptr)
+- are settable (e.g. contained in an interface{})
+*/}}
+func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}}
+ vp := rv.Addr().Interface().(*[]{{ .Elem }})
+ v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]{{ .Elem }})
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) {
+ v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
+ dd := d.d
+ {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []{{ .Elem }}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange { {{/*
+ // fast-path is for "basic" immutable types, so no need to copy them over
+ // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen))
+ // copy(s, v[:cap(v)])
+ // v = s */}}
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]{{ .Elem }}, xlen)
+ }
+ } else {
+ v = make([]{{ .Elem }}, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ } {{/* // all checks done. cannot go past len. */}}
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ }
+ if xtrunc { {{/* // means canChange=true, changed=true already. */}}
+ for ; j < containerLenS; j++ {
+ v = append(v, {{ zerocmd .Elem }})
+ slh.ElemContainerState(j)
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}}
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []{{ .Elem }}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]{{ .Elem }}, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, {{ zerocmd .Elem }})
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) { {{/* // all checks done. cannot go past len. */}}
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j])
+ {{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+{{end}}{{end}}{{end}}
+
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+{{/*
+Maps can change if they are
+- addressable (from a ptr)
+- settable (e.g. contained in an interface{})
+*/}}
+func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }})
+ v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }})
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) {
+ v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
+ dd := d.d
+ cr := d.cr
+ {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
+ v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
+ changed = true
+ }
+ {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}}
+ var mk {{ .MapKey }}
+ var mv {{ .Elem }}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{ if eq .MapKey "interface{}" }}mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+ }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
+ d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{ if eq .MapKey "interface{}" }}mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+ }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
+ d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil { cr.sendContainerState(containerMapEnd) }
+ return v, changed
+}
+
+{{end}}{{end}}{{end}}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 0000000..d6f5f0c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,32 @@
+// +build notfastpath
+
+package codec
+
+import "reflect"
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func (x fastpathA) index(rtid uintptr) int { return -1 }
+
+var fastpathAV fastpathA
+var fastpathTV fastpathT
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
new file mode 100644
index 0000000..32df541
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
@@ -0,0 +1,104 @@
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else if {{var "l"}} > 0 {
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ }
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ _, _ = {{var "rl"}}, {{var "rt"}}
+ {{var "rr"}} = {{var "l"}} // len({{var "v"}})
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
+ {{var "j"}} := 0
+ for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ z.DecSwallow()
+ }
+ {{ else }}if {{var "rt"}} {
+ for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ {{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ if {{var "j"}} < len({{var "v"}}) {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ } else {
+ z.DecSwallow()
+ }
+ {{end}}
+ }
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
new file mode 100644
index 0000000..77400e0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
@@ -0,0 +1,58 @@
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} > 0 {
+for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} else if {{var "l"}} < 0 {
+for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
new file mode 100644
index 0000000..22bce77
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -0,0 +1,233 @@
+// //+build ignore
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
+// ************************************************************
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
+ return genHelperEncoder{e: e}, e.e
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
+ return genHelperDecoder{d: d}, d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ f.e.encodeI(iv, false, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.e.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v)
+ if rt.Kind() == reflect.Ptr {
+ rt = rt.Elem()
+ }
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.e.h.getExt(rtid); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() {
+ f.d.swallow()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ f.d.decodeI(iv, chkPtr, false, false, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.d.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v).Elem()
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.d.h.getExt(rtid); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
new file mode 100644
index 0000000..3195857
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
@@ -0,0 +1,364 @@
+// //+build ignore
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
+// ************************************************************
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
+ return genHelperEncoder{e:e}, e.e
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
+ return genHelperDecoder{d:d}, d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ f.e.encodeI(iv, false, false)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.e.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v)
+ if rt.Kind() == reflect.Ptr {
+ rt = rt.Elem()
+ }
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.e.h.getExt(rtid); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() {
+ f.d.swallow()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ f.d.decodeI(iv, chkPtr, false, false, false)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.d.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v).Elem()
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.d.h.getExt(rtid); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ return decInferLen(clen, maxlen, unit)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
+
+{{/*
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncDriver() encDriver {
+ return f.e.e
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecDriver() decDriver {
+ return f.d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncNil() {
+ f.e.e.EncodeNil()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBytes(v []byte) {
+ f.e.e.EncodeStringBytes(c_RAW, v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncArrayStart(length int) {
+ f.e.e.EncodeArrayStart(length)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncArrayEnd() {
+ f.e.e.EncodeArrayEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncArrayEntrySeparator() {
+ f.e.e.EncodeArrayEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapStart(length int) {
+ f.e.e.EncodeMapStart(length)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapEnd() {
+ f.e.e.EncodeMapEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapEntrySeparator() {
+ f.e.e.EncodeMapEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapKVSeparator() {
+ f.e.e.EncodeMapKVSeparator()
+}
+
+// ---------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBytes(v *[]byte) {
+ *v = f.d.d.DecodeBytes(*v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTryNil() bool {
+ return f.d.d.TryDecodeAsNil()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecContainerIsNil() (b bool) {
+ return f.d.d.IsContainerType(valueTypeNil)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecContainerIsMap() (b bool) {
+ return f.d.d.IsContainerType(valueTypeMap)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecContainerIsArray() (b bool) {
+ return f.d.d.IsContainerType(valueTypeArray)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecCheckBreak() bool {
+ return f.d.d.CheckBreak()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapStart() int {
+ return f.d.d.ReadMapStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayStart() int {
+ return f.d.d.ReadArrayStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapEnd() {
+ f.d.d.ReadMapEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayEnd() {
+ f.d.d.ReadArrayEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayEntrySeparator() {
+ f.d.d.ReadArrayEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapEntrySeparator() {
+ f.d.d.ReadMapEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapKVSeparator() {
+ f.d.d.ReadMapKVSeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte {
+ return f.d.d.DecodeStringAsBytes(bs)
+}
+
+
+// -- encode calls (primitives)
+{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) {
+ ee := f.e.e
+ {{ encmd .Primitive "v" }}
+}
+{{ end }}{{ end }}{{ end }}
+
+// -- decode calls (primitives)
+{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) {
+ dd := f.d.d
+ *vp = {{ decmd .Primitive }}
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) {
+ dd := f.d.d
+ v = {{ decmd .Primitive }}
+ return
+}
+{{ end }}{{ end }}{{ end }}
+
+
+// -- encode calls (slices/maps)
+{{range .Values}}{{if not .Primitive }}{{if .Slice }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
+ f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e)
+}
+{{ end }}{{ end }}
+
+// -- decode calls (slices/maps)
+{{range .Values}}{{if not .Primitive }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) {
+{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
+ v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d)
+ if changed {
+ *vp = v
+ }
+}
+{{ end }}{{ end }}
+*/}}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go
new file mode 100644
index 0000000..2ace97b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = `
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} > 0 {
+for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} else if {{var "l"}} < 0 {
+for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
+`
+
+const genDecListTmpl = `
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else if {{var "l"}} > 0 {
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ }
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ _, _ = {{var "rl"}}, {{var "rt"}}
+ {{var "rr"}} = {{var "l"}} // len({{var "v"}})
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
+ {{var "j"}} := 0
+ for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ z.DecSwallow()
+ }
+ {{ else }}if {{var "rt"}} {
+ for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ {{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ if {{var "j"}} < len({{var "v"}}) {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ } else {
+ z.DecSwallow()
+ }
+ {{end}}
+ }
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
+`
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go
new file mode 100644
index 0000000..ffc5aec
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go
@@ -0,0 +1,1997 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ---------------------------------------------------
+// codecgen supports the full cycle of reflection-based codec:
+// - RawExt
+// - Builtins
+// - Extensions
+// - (Binary|Text|JSON)(Unm|M)arshal
+// - generic by-kind
+//
+// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
+// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
+// - Extensions, only if Extensions are configured.
+//
+// However, codecgen doesn't support the following:
+// - Canonical option. (codecgen IGNORES it currently)
+// This is just because it has not been implemented.
+//
+// During encode/decode, Selfer takes precedence.
+// A type implementing Selfer will know how to encode/decode itself statically.
+//
+// The following field types are supported:
+// array: [n]T
+// slice: []T
+// map: map[K]V
+// primitive: [u]int[n], float(32|64), bool, string
+// struct
+//
+// ---------------------------------------------------
+// Note that a Selfer cannot call (e|d).(En|De)code on itself,
+// as this will cause a circular reference, as (En|De)code will call Selfer methods.
+// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
+//
+// In addition, code in this file manages the generation of fast-path implementations of
+// encode/decode of slices/maps of primitive keys/values.
+//
+// Users MUST re-generate their implementations whenever the code shape changes.
+// The generated code will panic if it was generated with a version older than the supporting library.
+// ---------------------------------------------------
+//
+// codec framework is very feature rich.
+// When encoding or decoding into an interface, it depends on the runtime type of the interface.
+// The type of the interface may be a named type, an extension, etc.
+// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
+// In addition, we fallback for any value which cannot be guaranteed at runtime.
+// This allows us support ANY value, including any named types, specifically those which
+// do not implement our interfaces (e.g. Selfer).
+//
+// This explains some slowness compared to other code generation codecs (e.g. msgp).
+// This reduction in speed is only seen when your refers to interfaces,
+// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
+//
+// codecgen will panic if the file was generated with an old version of the library in use.
+//
+// Note:
+// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
+// This way, there isn't a function call overhead just to see that we should not enter a block of code.
+
+// GenVersion is the current version of codecgen.
+//
+// NOTE: Increment this value each time codecgen changes fundamentally.
+// Fundamental changes are:
+// - helper methods change (signature change, new ones added, some removed, etc)
+// - codecgen command line changes
+//
+// v1: Initial Version
+// v2:
+// v3: Changes for Kubernetes:
+// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
+// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+const GenVersion = 5
+
+const (
+ genCodecPkg = "codec1978"
+ genTempVarPfx = "yy"
+ genTopLevelVarName = "x"
+
+ // ignore canBeNil parameter, and always set to true.
+ // This is because nil can appear anywhere, so we should always check.
+ genAnythingCanBeNil = true
+
+ // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
+ // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
+ // are not executed a lot.
+ //
+ // From testing, it didn't make much difference in runtime, so keep as true (one function only)
+ genUseOneFunctionForDecStructMap = true
+)
+
+type genStructMapStyle uint8
+
+const (
+ genStructMapStyleConsolidated genStructMapStyle = iota
+ genStructMapStyleLenPrefix
+ genStructMapStyleCheckBreak
+)
+
+var (
+ genAllTypesSamePkgErr = errors.New("All types must be in the same package")
+ genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
+ genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
+ genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
+)
+
+// genRunner holds some state used during a Gen run.
+type genRunner struct {
+ w io.Writer // output
+ c uint64 // counter used for generating varsfx
+ t []reflect.Type // list of types to run selfer on
+
+ tc reflect.Type // currently running selfer on this type
+ te map[uintptr]bool // types for which the encoder has been created
+ td map[uintptr]bool // types for which the decoder has been created
+ cp string // codec import path
+
+ im map[string]reflect.Type // imports to add
+ imn map[string]string // package names of imports to add
+ imc uint64 // counter for import numbers
+
+ is map[reflect.Type]struct{} // types seen during import search
+ bp string // base PkgPath, for which we are generating for
+
+ cpfx string // codec package prefix
+ unsafe bool // is unsafe to be used in generated code?
+
+ tm map[reflect.Type]struct{} // types for which enc/dec must be generated
+ ts []reflect.Type // types for which enc/dec must be generated
+
+ xs string // top level variable/constant suffix
+ hn string // fn helper type name
+
+ ti *TypeInfos
+ // rr *rand.Rand // random generator for file-specific types
+}
+
+// Gen will write a complete go file containing Selfer implementations for each
+// type passed. All the types must be in the same package.
+//
+// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
+func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
+ if len(typ) == 0 {
+ return
+ }
+ x := genRunner{
+ unsafe: useUnsafe,
+ w: w,
+ t: typ,
+ te: make(map[uintptr]bool),
+ td: make(map[uintptr]bool),
+ im: make(map[string]reflect.Type),
+ imn: make(map[string]string),
+ is: make(map[reflect.Type]struct{}),
+ tm: make(map[reflect.Type]struct{}),
+ ts: []reflect.Type{},
+ bp: genImportPath(typ[0]),
+ xs: uid,
+ ti: ti,
+ }
+ if x.ti == nil {
+ x.ti = defTypeInfos
+ }
+ if x.xs == "" {
+ rr := rand.New(rand.NewSource(time.Now().UnixNano()))
+ x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
+ }
+
+ // gather imports first:
+ x.cp = genImportPath(reflect.TypeOf(x))
+ x.imn[x.cp] = genCodecPkg
+ for _, t := range typ {
+ // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ if genImportPath(t) != x.bp {
+ panic(genAllTypesSamePkgErr)
+ }
+ x.genRefPkgs(t)
+ }
+ if buildTags != "" {
+ x.line("//+build " + buildTags)
+ x.line("")
+ }
+ x.line(`
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+`)
+ x.line("package " + pkgName)
+ x.line("")
+ x.line("import (")
+ if x.cp != x.bp {
+ x.cpfx = genCodecPkg + "."
+ x.linef("%s \"%s\"", genCodecPkg, x.cp)
+ }
+ // use a sorted set of im keys, so that we can get consistent output
+ imKeys := make([]string, 0, len(x.im))
+ for k, _ := range x.im {
+ imKeys = append(imKeys, k)
+ }
+ sort.Strings(imKeys)
+ for _, k := range imKeys { // for k, _ := range x.im {
+ x.linef("%s \"%s\"", x.imn[k], k)
+ }
+ // add required packages
+ for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} {
+ if _, ok := x.im[k]; !ok {
+ if k == "unsafe" && !x.unsafe {
+ continue
+ }
+ x.line("\"" + k + "\"")
+ }
+ }
+ x.line(")")
+ x.line("")
+
+ x.line("const (")
+ x.linef("// ----- content types ----")
+ x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8))
+ x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW))
+ x.linef("// ----- value types used ----")
+ x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray))
+ x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap))
+ x.linef("// ----- containerStateValues ----")
+ x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey))
+ x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue))
+ x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd))
+ x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem))
+ x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd))
+ x.line(")")
+ x.line("var (")
+ x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())")
+ x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)")
+ x.line(")")
+ x.line("")
+
+ if x.unsafe {
+ x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}")
+ x.line("")
+ }
+ x.hn = "codecSelfer" + x.xs
+ x.line("type " + x.hn + " struct{}")
+ x.line("")
+
+ x.varsfxreset()
+ x.line("func init() {")
+ x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
+ x.line("_, file, _, _ := runtime.Caller(0)")
+ x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
+ x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx)
+ x.line("panic(err)")
+ x.linef("}")
+ x.line("if false { // reference the types, but skip this branch at build/run time")
+ var n int
+ // for k, t := range x.im {
+ for _, k := range imKeys {
+ t := x.im[k]
+ x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
+ n++
+ }
+ if x.unsafe {
+ x.linef("var v%v unsafe.Pointer", n)
+ n++
+ }
+ if n > 0 {
+ x.out("_")
+ for i := 1; i < n; i++ {
+ x.out(", _")
+ }
+ x.out(" = v0")
+ for i := 1; i < n; i++ {
+ x.outf(", v%v", i)
+ }
+ }
+ x.line("} ") // close if false
+ x.line("}") // close init
+ x.line("")
+
+ // generate rest of type info
+ for _, t := range typ {
+ x.tc = t
+ x.selfer(true)
+ x.selfer(false)
+ }
+
+ for _, t := range x.ts {
+ rtid := reflect.ValueOf(t).Pointer()
+ // generate enc functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(true)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.encListFallback("v", t)
+ case reflect.Map:
+ x.encMapFallback("v", t)
+ default:
+ panic(genExpectArrayOrMapErr)
+ }
+ x.line("}")
+ x.line("")
+
+ // generate dec functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(false)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.decListFallback("v", rtid, t)
+ case reflect.Map:
+ x.decMapFallback("v", rtid, t)
+ default:
+ panic(genExpectArrayOrMapErr)
+ }
+ x.line("}")
+ x.line("")
+ }
+
+ x.line("")
+}
+
+func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
+ // return varname != genTopLevelVarName && t != x.tc
+ // the only time we checkForSelfer is if we are not at the TOP of the generated code.
+ return varname != genTopLevelVarName
+}
+
+func (x *genRunner) arr2str(t reflect.Type, s string) string {
+ if t.Kind() == reflect.Array {
+ return s
+ }
+ return ""
+}
+
+func (x *genRunner) genRequiredMethodVars(encode bool) {
+ x.line("var h " + x.hn)
+ if encode {
+ x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)")
+ } else {
+ x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)")
+ }
+ x.line("_, _, _ = h, z, r")
+}
+
+func (x *genRunner) genRefPkgs(t reflect.Type) {
+ if _, ok := x.is[t]; ok {
+ return
+ }
+ // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ x.is[t] = struct{}{}
+ tpkg, tname := genImportPath(t), t.Name()
+ if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
+ if _, ok := x.im[tpkg]; !ok {
+ x.im[tpkg] = t
+ if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
+ x.imn[tpkg] = tpkg
+ } else {
+ x.imc++
+ x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
+ x.genRefPkgs(t.Elem())
+ case reflect.Map:
+ x.genRefPkgs(t.Elem())
+ x.genRefPkgs(t.Key())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
+ x.genRefPkgs(t.Field(i).Type)
+ }
+ }
+ }
+}
+
+func (x *genRunner) line(s string) {
+ x.out(s)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) varsfx() string {
+ x.c++
+ return strconv.FormatUint(x.c, 10)
+}
+
+func (x *genRunner) varsfxreset() {
+ x.c = 0
+}
+
+func (x *genRunner) out(s string) {
+ if _, err := io.WriteString(x.w, s); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) linef(s string, params ...interface{}) {
+ x.line(fmt.Sprintf(s, params...))
+}
+
+func (x *genRunner) outf(s string, params ...interface{}) {
+ x.out(fmt.Sprintf(s, params...))
+}
+
+func (x *genRunner) genTypeName(t reflect.Type) (n string) {
+ // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
+
+ // if the type has a PkgPath, which doesn't match the current package,
+ // then include it.
+ // We cannot depend on t.String() because it includes current package,
+ // or t.PkgPath because it includes full import path,
+ //
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "*"
+ t = t.Elem()
+ }
+ if tn := t.Name(); tn != "" {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
+ case reflect.Slice:
+ return ptrPfx + "[]" + x.genTypeName(t.Elem())
+ case reflect.Array:
+ return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
+ case reflect.Chan:
+ return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
+ default:
+ if t == intfTyp {
+ return ptrPfx + "interface{}"
+ } else {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ }
+}
+
+func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
+ if t.Name() == "" {
+ return t.String()
+ } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
+ return t.Name()
+ } else {
+ return x.imn[genImportPath(t)] + "." + t.Name()
+ // return t.String() // best way to get the package name inclusive
+ }
+}
+
+func (x *genRunner) genZeroValueR(t reflect.Type) string {
+ // if t is a named type, w
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
+ reflect.Slice, reflect.Map, reflect.Invalid:
+ return "nil"
+ case reflect.Bool:
+ return "false"
+ case reflect.String:
+ return `""`
+ case reflect.Struct, reflect.Array:
+ return x.genTypeName(t) + "{}"
+ default: // all numbers
+ return "0"
+ }
+}
+
+func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
+ return genMethodNameT(t, x.tc)
+}
+
+func (x *genRunner) selfer(encode bool) {
+ t := x.tc
+ t0 := t
+ // always make decode use a pointer receiver,
+ // and structs always use a ptr receiver (encode|decode)
+ isptr := !encode || t.Kind() == reflect.Struct
+ x.varsfxreset()
+ fnSigPfx := "func (x "
+ if isptr {
+ fnSigPfx += "*"
+ }
+ fnSigPfx += x.genTypeName(t)
+
+ x.out(fnSigPfx)
+ if isptr {
+ t = reflect.PtrTo(t)
+ }
+ if encode {
+ x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
+ x.genRequiredMethodVars(true)
+ // x.enc(genTopLevelVarName, t)
+ x.encVar(genTopLevelVarName, t)
+ } else {
+ x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ // do not use decVar, as there is no need to check TryDecodeAsNil
+ // or way to elegantly handle that, and also setting it to a
+ // non-nil value doesn't affect the pointer passed.
+ // x.decVar(genTopLevelVarName, t, false)
+ x.dec(genTopLevelVarName, t0)
+ }
+ x.line("}")
+ x.line("")
+
+ if encode || t0.Kind() != reflect.Struct {
+ return
+ }
+
+ // write is containerMap
+ if genUseOneFunctionForDecStructMap {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated)
+ x.line("}")
+ x.line("")
+ } else {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix)
+ x.line("}")
+ x.line("")
+
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak)
+ x.line("}")
+ x.line("")
+ }
+
+ // write containerArray
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0)
+ x.line("}")
+ x.line("")
+
+}
+
+// used for chan, array, slice, map
+func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) {
+ if encode {
+ x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname)
+ } else {
+ x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
+ }
+ x.registerXtraT(t)
+}
+
+func (x *genRunner) registerXtraT(t reflect.Type) {
+ // recursively register the types
+ if _, ok := x.tm[t]; ok {
+ return
+ }
+ var tkey reflect.Type
+ switch t.Kind() {
+ case reflect.Chan, reflect.Slice, reflect.Array:
+ case reflect.Map:
+ tkey = t.Key()
+ default:
+ return
+ }
+ x.tm[t] = struct{}{}
+ x.ts = append(x.ts, t)
+ // check if this refers to any xtra types eg. a slice of array: add the array
+ x.registerXtraT(t.Elem())
+ if tkey != nil {
+ x.registerXtraT(tkey)
+ }
+}
+
+// encVar will encode a variable.
+// The parameter, t, is the reflect.Type of the variable itself
+func (x *genRunner) encVar(varname string, t reflect.Type) {
+ // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t)
+ var checkNil bool
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
+ checkNil = true
+ }
+ if checkNil {
+ x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+ }
+ switch t.Kind() {
+ case reflect.Ptr:
+ switch t.Elem().Kind() {
+ case reflect.Struct, reflect.Array:
+ x.enc(varname, genNonPtr(t))
+ default:
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := *" + varname)
+ x.enc(genTempVarPfx+i, genNonPtr(t))
+ }
+ case reflect.Struct, reflect.Array:
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := &" + varname)
+ x.enc(genTempVarPfx+i, t)
+ default:
+ x.enc(varname, t)
+ }
+
+ if checkNil {
+ x.line("}")
+ }
+
+}
+
+// enc will encode a variable (varname) of type t,
+// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying)
+func (x *genRunner) enc(varname string, t reflect.Type) {
+ rtid := reflect.ValueOf(t).Pointer()
+ // We call CodecEncodeSelf if one of the following are honored:
+ // - the type already implements Selfer, call that
+ // - the type has a Selfer implementation just created, use that
+ // - the type is in the list of the ones we will generate for, but it is not currently being generated
+
+ mi := x.varsfx()
+ tptr := reflect.PtrTo(t)
+ tk := t.Kind()
+ if x.checkForSelfer(t, varname) {
+ if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
+ if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ } else { // varname is of type T
+ if t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ } else if tptr.Implements(selferTyp) {
+ x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
+ x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
+ return
+ }
+ }
+
+ if _, ok := x.te[rtid]; ok {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.te[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+ x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
+ x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == rawExtTyp {
+ x.linef("} else { r.EncodeRawExt(%v, e)", varname)
+ return
+ }
+ // HACK: Support for Builtins.
+ // Currently, only Binc supports builtins, and the only builtin type is time.Time.
+ // Have a method that returns the rtid for time.Time if Handle is Binc.
+ if t == timeTyp {
+ vrtid := genTempVarPfx + "m" + x.varsfx()
+ x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
+ x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname)
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ if genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
+ }
+ if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
+ if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ } else { // varname is of type T
+ if t.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname)
+ } else if t.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname)
+ }
+ }
+ x.line("} else {")
+
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(int64(" + varname + "))")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(uint64(" + varname + "))")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(float32(" + varname + "))")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(float64(" + varname + "))")
+ case reflect.Bool:
+ x.line("r.EncodeBool(bool(" + varname + "))")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))")
+ case reflect.Chan:
+ x.xtraSM(varname, true, t)
+ // x.encListFallback(varname, rtid, t)
+ case reflect.Array:
+ x.xtraSM(varname, true, t)
+ case reflect.Slice:
+ // if nil, call dedicated function
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
+ } else {
+ x.xtraSM(varname, true, t)
+ // x.encListFallback(varname, rtid, t)
+ }
+ case reflect.Map:
+ // if nil, call dedicated function
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
+ } else {
+ x.xtraSM(varname, true, t)
+ // x.encMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if !inlist {
+ delete(x.te, rtid)
+ x.line("z.EncFallback(" + varname + ")")
+ break
+ }
+ x.encStruct(varname, rtid, t)
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.EncFallback(" + varname + ")")
+ }
+}
+
+func (x *genRunner) encZero(t reflect.Type) {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(0)")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(0)")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(0)")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(0)")
+ case reflect.Bool:
+ x.line("r.EncodeBool(false)")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`)
+ default:
+ x.line("r.EncodeNil()")
+ }
+}
+
+func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
+ // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
+ // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
+
+ // if t === type currently running selfer on, do for all
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ sepVarname := genTempVarPfx + "sep" + i
+ numfieldsvar := genTempVarPfx + "q" + i
+ ti2arrayvar := genTempVarPfx + "r" + i
+ struct2arrvar := genTempVarPfx + "2arr" + i
+
+ x.line(sepVarname + " := !z.EncBinary()")
+ x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ // due to omitEmpty, we need to calculate the
+ // number of non-empty things we write out first.
+ // This is required as we need to pre-determine the size of the container,
+ // to support length-prefixing.
+ x.linef("var %s [%v]bool", numfieldsvar, len(tisfi))
+ x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar)
+ x.linef("const %s bool = %v", ti2arrayvar, ti.toArray)
+ nn := 0
+ for j, si := range tisfi {
+ if !si.omitEmpty {
+ nn++
+ continue
+ }
+ var t2 reflect.StructField
+ var omitline string
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ omitline += varname3 + " != nil && "
+ }
+ }
+ }
+ // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
+ // also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
+ switch t2.Type.Kind() {
+ case reflect.Struct:
+ omitline += " true"
+ case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
+ omitline += "len(" + varname + "." + t2.Name + ") != 0"
+ default:
+ omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type)
+ }
+ x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
+ }
+ x.linef("var %snn%s int", genTempVarPfx, i)
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
+ x.linef("} else {") // if not ti.toArray
+ x.linef("%snn%s = %v", genTempVarPfx, i, nn)
+ x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
+ x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i)
+ x.linef("%snn%s = %v", genTempVarPfx, i, 0)
+ // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
+ x.line("}") // close if not StructToArray
+
+ for j, si := range tisfi {
+ i := x.varsfx()
+ isNilVarName := genTempVarPfx + "n" + i
+ var labelUsed bool
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix)
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ if !labelUsed {
+ x.line("var " + isNilVarName + " bool")
+ }
+ x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ")
+ x.line("goto LABEL" + i)
+ x.line("}")
+ labelUsed = true
+ // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
+ }
+ }
+ // t2 = t.FieldByIndex(si.is)
+ }
+ if labelUsed {
+ x.line("LABEL" + i + ":")
+ }
+ // if the type of the field is a Selfer, or one of the ones
+
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ }
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ if si.omitEmpty {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ if si.omitEmpty {
+ x.linef("} else {")
+ x.encZero(t2.Type)
+ x.linef("}")
+ }
+ if labelUsed {
+ x.line("}")
+ }
+
+ x.linef("} else {") // if not ti.toArray
+
+ if si.omitEmpty {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ x.line("}")
+ } else {
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ }
+ if si.omitEmpty {
+ x.line("}")
+ }
+ x.linef("} ") // end if/else ti.toArray
+ }
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+ x.line("}")
+
+}
+
+func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+ i := x.varsfx()
+ g := genTempVarPfx
+ x.line("r.EncodeArrayStart(len(" + varname + "))")
+ if t.Kind() == reflect.Chan {
+ x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.linef("%sv%s := <-%s", g, i, varname)
+ } else {
+ // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ }
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+}
+
+func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
+ // TODO: expand this to handle canonical.
+ i := x.varsfx()
+ x.line("r.EncodeMapStart(len(" + varname + "))")
+ x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.encVar(genTempVarPfx+"k"+i, t.Key())
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+}
+
+func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+ i := x.varsfx()
+ if !canBeNil {
+ canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
+ }
+ if canBeNil {
+ x.line("if r.TryDecodeAsNil() {")
+ if t.Kind() == reflect.Ptr {
+ x.line("if " + varname + " != nil { ")
+
+ // if varname is a field of a struct (has a dot in it),
+ // then just set it to nil
+ if strings.IndexByte(varname, '.') != -1 {
+ x.line(varname + " = nil")
+ } else {
+ x.line("*" + varname + " = " + x.genZeroValueR(t.Elem()))
+ }
+ x.line("}")
+ } else {
+ x.line(varname + " = " + x.genZeroValueR(t))
+ }
+ x.line("} else {")
+ } else {
+ x.line("// cannot be nil")
+ }
+ if t.Kind() != reflect.Ptr {
+ if x.decTryAssignPrimitive(varname, t) {
+ x.line(genTempVarPfx + "v" + i + " := &" + varname)
+ x.dec(genTempVarPfx+"v"+i, t)
+ }
+ } else {
+ x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
+ // Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
+ // There's a chance of a **T in here which is nil.
+ var ptrPfx string
+ for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
+ ptrPfx += "*"
+ x.linef("if %s%s == nil { %s%s = new(%s)}",
+ ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
+ }
+ // if varname has [ in it, then create temp variable for this ptr thingie
+ if strings.Index(varname, "[") >= 0 {
+ varname2 := genTempVarPfx + "w" + i
+ x.line(varname2 + " := " + varname)
+ varname = varname2
+ }
+
+ if ptrPfx == "" {
+ x.dec(varname, t)
+ } else {
+ x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname)
+ x.dec(genTempVarPfx+"z"+i, t)
+ }
+
+ }
+
+ if canBeNil {
+ x.line("} ")
+ }
+}
+
+// dec will decode a variable (varname) of type ptrTo(t).
+// t is always a basetype (i.e. not of kind reflect.Ptr).
+func (x *genRunner) dec(varname string, t reflect.Type) {
+ // assumptions:
+ // - the varname is to a pointer already. No need to take address of it
+ // - t is always a baseType T (not a *T, etc).
+ rtid := reflect.ValueOf(t).Pointer()
+ tptr := reflect.PtrTo(t)
+ if x.checkForSelfer(t, varname) {
+ if t.Implements(selferTyp) || tptr.Implements(selferTyp) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ if _, ok := x.td[rtid]; ok {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.td[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+ mi := x.varsfx()
+ x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
+ x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == rawExtTyp {
+ x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
+ return
+ }
+
+ // HACK: Support for Builtins.
+ // Currently, only Binc supports builtins, and the only builtin type is time.Time.
+ // Have a method that returns the rtid for time.Time if Handle is Binc.
+ if t == timeTyp {
+ vrtid := genTempVarPfx + "m" + x.varsfx()
+ x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
+ x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname)
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ if genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
+ }
+
+ if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) {
+ x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname)
+ } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) {
+ x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname)
+ }
+
+ x.line("} else {")
+
+ // Since these are pointers, we cannot share, and have to use them one by one
+ switch t.Kind() {
+ case reflect.Int:
+ x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))")
+ // x.line("z.DecInt((*int)(" + varname + "))")
+ case reflect.Int8:
+ x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))")
+ // x.line("z.DecInt8((*int8)(" + varname + "))")
+ case reflect.Int16:
+ x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))")
+ // x.line("z.DecInt16((*int16)(" + varname + "))")
+ case reflect.Int32:
+ x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))")
+ // x.line("z.DecInt32((*int32)(" + varname + "))")
+ case reflect.Int64:
+ x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))")
+ // x.line("z.DecInt64((*int64)(" + varname + "))")
+
+ case reflect.Uint:
+ x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
+ // x.line("z.DecUint((*uint)(" + varname + "))")
+ case reflect.Uint8:
+ x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))")
+ // x.line("z.DecUint8((*uint8)(" + varname + "))")
+ case reflect.Uint16:
+ x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))")
+ //x.line("z.DecUint16((*uint16)(" + varname + "))")
+ case reflect.Uint32:
+ x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))")
+ //x.line("z.DecUint32((*uint32)(" + varname + "))")
+ case reflect.Uint64:
+ x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))")
+ //x.line("z.DecUint64((*uint64)(" + varname + "))")
+ case reflect.Uintptr:
+ x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
+
+ case reflect.Float32:
+ x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))")
+ //x.line("z.DecFloat32((*float32)(" + varname + "))")
+ case reflect.Float64:
+ x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))")
+ // x.line("z.DecFloat64((*float64)(" + varname + "))")
+
+ case reflect.Bool:
+ x.line("*((*bool)(" + varname + ")) = r.DecodeBool()")
+ // x.line("z.DecBool((*bool)(" + varname + "))")
+ case reflect.String:
+ x.line("*((*string)(" + varname + ")) = r.DecodeString()")
+ // x.line("z.DecString((*string)(" + varname + "))")
+ case reflect.Array, reflect.Chan:
+ x.xtraSM(varname, false, t)
+ // x.decListFallback(varname, rtid, true, t)
+ case reflect.Slice:
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
+ } else {
+ x.xtraSM(varname, false, t)
+ // x.decListFallback(varname, rtid, false, t)
+ }
+ case reflect.Map:
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
+ } else {
+ x.xtraSM(varname, false, t)
+ // x.decMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if inlist {
+ x.decStruct(varname, rtid, t)
+ } else {
+ // delete(x.td, rtid)
+ x.line("z.DecFallback(" + varname + ", false)")
+ }
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.DecFallback(" + varname + ", true)")
+ }
+}
+
+func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) {
+ // We have to use the actual type name when doing a direct assignment.
+ // We don't have the luxury of casting the pointer to the underlying type.
+ //
+ // Consequently, in the situation of a
+ // type Message int32
+ // var x Message
+ // var i int32 = 32
+ // x = i // this will bomb
+ // x = Message(i) // this will work
+ // *((*int32)(&x)) = i // this will work
+ //
+ // Consequently, we replace:
+ // case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))")
+ // with:
+ // case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))")
+
+ xfn := func(t reflect.Type) string {
+ return x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Int:
+ x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs)
+ case reflect.Int8:
+ x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t))
+ case reflect.Int16:
+ x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t))
+ case reflect.Int32:
+ x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t))
+ case reflect.Int64:
+ x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t))
+
+ case reflect.Uint:
+ x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
+ case reflect.Uint8:
+ x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t))
+ case reflect.Uint16:
+ x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t))
+ case reflect.Uint32:
+ x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t))
+ case reflect.Uint64:
+ x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t))
+ case reflect.Uintptr:
+ x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
+
+ case reflect.Float32:
+ x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t))
+ case reflect.Float64:
+ x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t))
+
+ case reflect.Bool:
+ x.linef("%s = %s(r.DecodeBool())", varname, xfn(t))
+ case reflect.String:
+ x.linef("%s = %s(r.DecodeString())", varname, xfn(t))
+ default:
+ tryAsPtr = true
+ }
+ return
+}
+
+func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Rand string
+ Varname string
+ CTyp string
+ Typ string
+ Immutable bool
+ Size int
+ }
+ telem := t.Elem()
+ ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
+
+ funcs := make(template.FuncMap)
+
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, telem, false)
+ return ""
+ }
+ funcs["decLine"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+ funcs["zero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["isArray"] = func() bool {
+ return t.Kind() == reflect.Array
+ }
+ funcs["isSlice"] = func() bool {
+ return t.Kind() == reflect.Slice
+ }
+ funcs["isChan"] = func() bool {
+ return t.Kind() == reflect.Chan
+ }
+ tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Sfx string
+ Rand string
+ Varname string
+ KTyp string
+ Typ string
+ Size int
+ }
+ telem := t.Elem()
+ tkey := t.Key()
+ ts := tstruc{
+ genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
+ x.genTypeName(telem), int(telem.Size() + tkey.Size()),
+ }
+
+ funcs := make(template.FuncMap)
+ funcs["decElemZero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["decElemKindImmutable"] = func() bool {
+ return genIsImmutable(telem)
+ }
+ funcs["decElemKindPtr"] = func() bool {
+ return telem.Kind() == reflect.Ptr
+ }
+ funcs["decElemKindIntf"] = func() bool {
+ return telem.Kind() == reflect.Interface
+ }
+ funcs["decLineVarK"] = func(varname string) string {
+ x.decVar(varname, tkey, false)
+ return ""
+ }
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, telem, false)
+ return ""
+ }
+ funcs["decLineK"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false)
+ return ""
+ }
+ funcs["decLine"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+
+ tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ x.line("switch (" + kName + ") {")
+ for _, si := range tisfi {
+ x.line("case \"" + si.encName + "\":")
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
+ }
+ x.decVar(varname+"."+t2.Name, t2.Type, false)
+ }
+ x.line("default:")
+ // pass the slice here, so that the string will not escape, and maybe save allocation
+ x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
+ x.line("} // end switch " + kName)
+}
+
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ kName := tpfx + "s" + i
+
+ // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out.
+ // However, using that was more expensive, as it seems that the switch expression
+ // is evaluated each time.
+ //
+ // We could depend on decodeString using a temporary/shared buffer internally.
+ // However, this model of creating a byte array, and using explicitly is faster,
+ // and allows optional use of unsafe []byte->string conversion without alloc.
+
+ // Also, ensure that the slice array doesn't escape.
+ // That will help escape analysis prevent allocation when it gets better.
+
+ // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into")
+ // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into")
+ // use the scratch buffer to avoid allocation (most field names are < 32).
+
+ x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into")
+
+ x.line("_ = " + kName + "Slc")
+ switch style {
+ case genStructMapStyleLenPrefix:
+ x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
+ case genStructMapStyleCheckBreak:
+ x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
+ default: // 0, otherwise.
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
+ x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
+ x.line("} else { if r.CheckBreak() { break }; }")
+ }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)")
+ // let string be scoped to this loop alone, so it doesn't escape.
+ if x.unsafe {
+ x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" +
+ kName + "Slc[0])), len(" + kName + "Slc)}")
+ x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))")
+ } else {
+ x.line(kName + " := string(" + kName + "Slc)")
+ }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ x.decStructMapSwitch(kName, varname, rtid, t)
+
+ x.line("} // end for " + tpfx + "j" + i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+}
+
+func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ x.linef("var %sj%s int", tpfx, i)
+ x.linef("var %sb%s bool", tpfx, i) // break
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ for _, si := range tisfi {
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
+ }
+
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }",
+ tpfx, i, x.xs, breakString)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.decVar(varname+"."+t2.Name, t2.Type, true)
+ }
+ // read remaining values and throw away.
+ x.line("for {")
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { break }", tpfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
+ x.line("}")
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+}
+
+func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
+ // if container is map
+ i := x.varsfx()
+ x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+ x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+ if genUseOneFunctionForDecStructMap {
+ x.line("} else { ")
+ x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i)
+ } else {
+ x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
+ x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
+ x.line("} else {")
+ x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
+ }
+ x.line("}")
+
+ // else if container is array
+ x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else { ")
+ x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i)
+ x.line("}")
+ // else panic
+ x.line("} else { ")
+ x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")")
+ x.line("} ")
+}
+
+// --------
+
+type genV struct {
+ // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
+ MapKey string
+ Elem string
+ Primitive string
+ Size int
+}
+
+func (x *genRunner) newGenV(t reflect.Type) (v genV) {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array:
+ te := t.Elem()
+ v.Elem = x.genTypeName(te)
+ v.Size = int(te.Size())
+ case reflect.Map:
+ te, tk := t.Elem(), t.Key()
+ v.Elem = x.genTypeName(te)
+ v.MapKey = x.genTypeName(tk)
+ v.Size = int(te.Size() + tk.Size())
+ default:
+ panic("unexpected type for newGenV. Requires map or slice type")
+ }
+ return
+}
+
+func (x *genV) MethodNamePfx(prefix string, prim bool) string {
+ var name []byte
+ if prefix != "" {
+ name = append(name, prefix...)
+ }
+ if prim {
+ name = append(name, genTitleCaseName(x.Primitive)...)
+ } else {
+ if x.MapKey == "" {
+ name = append(name, "Slice"...)
+ } else {
+ name = append(name, "Map"...)
+ name = append(name, genTitleCaseName(x.MapKey)...)
+ }
+ name = append(name, genTitleCaseName(x.Elem)...)
+ }
+ return string(name)
+
+}
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
+
+// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
+//
+// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
+// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
+// We strip it here.
+func genImportPath(t reflect.Type) (s string) {
+ s = t.PkgPath()
+ if genCheckVendor {
+ // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
+ // if s contains /vendor/ OR startsWith vendor/, then return everything after it.
+ const vendorStart = "vendor/"
+ const vendorInline = "/vendor/"
+ if i := strings.LastIndex(s, vendorInline); i >= 0 {
+ s = s[i+len(vendorInline):]
+ } else if strings.HasPrefix(s, vendorStart) {
+ s = s[len(vendorStart):]
+ }
+ }
+ return
+}
+
+// A go identifier is (letter|_)[letter|number|_]*
+func genGoIdentifier(s string, checkFirstChar bool) string {
+ b := make([]byte, 0, len(s))
+ t := make([]byte, 4)
+ var n int
+ for i, r := range s {
+ if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
+ b = append(b, '_')
+ }
+ // r must be unicode_letter, unicode_digit or _
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ n = utf8.EncodeRune(t, r)
+ b = append(b, t[:n]...)
+ } else {
+ b = append(b, '_')
+ }
+ }
+ return string(b)
+}
+
+func genNonPtr(t reflect.Type) reflect.Type {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+func genTitleCaseName(s string) string {
+ switch s {
+ case "interface{}":
+ return "Intf"
+ default:
+ return strings.ToUpper(s[0:1]) + s[1:]
+ }
+}
+
+func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "Ptrto"
+ t = t.Elem()
+ }
+ tstr := t.String()
+ if tn := t.Name(); tn != "" {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ return ptrPfx + tn
+ } else {
+ if genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Slice:
+ return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
+ case reflect.Array:
+ return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Chan:
+ var cx string
+ switch t.ChanDir() {
+ case reflect.SendDir:
+ cx = "ChanSend"
+ case reflect.RecvDir:
+ cx = "ChanRecv"
+ default:
+ cx = "Chan"
+ }
+ return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
+ default:
+ if t == intfTyp {
+ return ptrPfx + "Interface"
+ } else {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ if t.Name() != "" {
+ return ptrPfx + t.Name()
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ } else {
+ // best way to get the package name inclusive
+ // return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
+ if t.Name() != "" && genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ }
+}
+
+// genCustomNameForType base64encodes the t.String() value in such a way
+// that it can be used within a function name.
+func genCustomTypeName(tstr string) string {
+ len2 := genBase64enc.EncodedLen(len(tstr))
+ bufx := make([]byte, len2)
+ genBase64enc.Encode(bufx, []byte(tstr))
+ for i := len2 - 1; i >= 0; i-- {
+ if bufx[i] == '=' {
+ len2--
+ } else {
+ break
+ }
+ }
+ return string(bufx[:len2])
+}
+
+func genIsImmutable(t reflect.Type) (v bool) {
+ return isImmutableKind(t.Kind())
+}
+
+type genInternal struct {
+ Values []genV
+ Unsafe bool
+}
+
+func (x genInternal) FastpathLen() (l int) {
+ for _, v := range x.Values {
+ if v.Primitive == "" {
+ l++
+ }
+ }
+ return
+}
+
+func genInternalZeroValue(s string) string {
+ switch s {
+ case "interface{}":
+ return "nil"
+ case "bool":
+ return "false"
+ case "string":
+ return `""`
+ default:
+ return "0"
+ }
+}
+
+func genInternalEncCommandAsString(s string, vname string) string {
+ switch s {
+ case "uint", "uint8", "uint16", "uint32", "uint64":
+ return "ee.EncodeUint(uint64(" + vname + "))"
+ case "int", "int8", "int16", "int32", "int64":
+ return "ee.EncodeInt(int64(" + vname + "))"
+ case "string":
+ return "ee.EncodeString(c_UTF8, " + vname + ")"
+ case "float32":
+ return "ee.EncodeFloat32(" + vname + ")"
+ case "float64":
+ return "ee.EncodeFloat64(" + vname + ")"
+ case "bool":
+ return "ee.EncodeBool(" + vname + ")"
+ case "symbol":
+ return "ee.EncodeSymbol(" + vname + ")"
+ default:
+ return "e.encode(" + vname + ")"
+ }
+}
+
+func genInternalDecCommandAsString(s string) string {
+ switch s {
+ case "uint":
+ return "uint(dd.DecodeUint(uintBitsize))"
+ case "uint8":
+ return "uint8(dd.DecodeUint(8))"
+ case "uint16":
+ return "uint16(dd.DecodeUint(16))"
+ case "uint32":
+ return "uint32(dd.DecodeUint(32))"
+ case "uint64":
+ return "dd.DecodeUint(64)"
+ case "uintptr":
+ return "uintptr(dd.DecodeUint(uintBitsize))"
+ case "int":
+ return "int(dd.DecodeInt(intBitsize))"
+ case "int8":
+ return "int8(dd.DecodeInt(8))"
+ case "int16":
+ return "int16(dd.DecodeInt(16))"
+ case "int32":
+ return "int32(dd.DecodeInt(32))"
+ case "int64":
+ return "dd.DecodeInt(64)"
+
+ case "string":
+ return "dd.DecodeString()"
+ case "float32":
+ return "float32(dd.DecodeFloat(true))"
+ case "float64":
+ return "dd.DecodeFloat(false)"
+ case "bool":
+ return "dd.DecodeBool()"
+ default:
+ panic(errors.New("gen internal: unknown type for decode: " + s))
+ }
+}
+
+func genInternalSortType(s string, elem bool) string {
+ for _, v := range [...]string{"int", "uint", "float", "bool", "string"} {
+ if strings.HasPrefix(s, v) {
+ if elem {
+ if v == "int" || v == "uint" || v == "float" {
+ return v + "64"
+ } else {
+ return v
+ }
+ }
+ return v + "Slice"
+ }
+ }
+ panic("sorttype: unexpected type: " + s)
+}
+
+// var genInternalMu sync.Mutex
+var genInternalV genInternal
+var genInternalTmplFuncs template.FuncMap
+var genInternalOnce sync.Once
+
+func genInternalInit() {
+ types := [...]string{
+ "interface{}",
+ "string",
+ "float32",
+ "float64",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "bool",
+ }
+ // keep as slice, so it is in specific iteration order.
+ // Initial order was uint64, string, interface{}, int, int64
+ mapvaltypes := [...]string{
+ "interface{}",
+ "string",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ "bool",
+ }
+ wordSizeBytes := int(intBitsize) / 8
+
+ mapvaltypes2 := map[string]int{
+ "interface{}": 2 * wordSizeBytes,
+ "string": 2 * wordSizeBytes,
+ "uint": 1 * wordSizeBytes,
+ "uint8": 1,
+ "uint16": 2,
+ "uint32": 4,
+ "uint64": 8,
+ "uintptr": 1 * wordSizeBytes,
+ "int": 1 * wordSizeBytes,
+ "int8": 1,
+ "int16": 2,
+ "int32": 4,
+ "int64": 8,
+ "float32": 4,
+ "float64": 8,
+ "bool": 1,
+ }
+ var gt genInternal
+
+ // For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
+ for _, s := range types {
+ gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
+ if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
+ gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+ }
+ if _, ok := mapvaltypes2[s]; !ok {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]})
+ }
+ for _, ms := range mapvaltypes {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]})
+ }
+ }
+
+ funcs := make(template.FuncMap)
+ // funcs["haspfx"] = strings.HasPrefix
+ funcs["encmd"] = genInternalEncCommandAsString
+ funcs["decmd"] = genInternalDecCommandAsString
+ funcs["zerocmd"] = genInternalZeroValue
+ funcs["hasprefix"] = strings.HasPrefix
+ funcs["sorttype"] = genInternalSortType
+
+ genInternalV = gt
+ genInternalTmplFuncs = funcs
+}
+
+// genInternalGoFile is used to generate source files from templates.
+// It is run by the program author alone.
+// Unfortunately, it has to be exported so that it can be called from a command line tool.
+// *** DO NOT USE ***
+func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) {
+ genInternalOnce.Do(genInternalInit)
+
+ gt := genInternalV
+ gt.Unsafe = !safe
+
+ t := template.New("").Funcs(genInternalTmplFuncs)
+
+ tmplstr, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+
+ if t, err = t.Parse(string(tmplstr)); err != nil {
+ return
+ }
+
+ var out bytes.Buffer
+ err = t.Execute(&out, gt)
+ if err != nil {
+ return
+ }
+
+ bout, err := format.Source(out.Bytes())
+ if err != nil {
+ w.Write(out.Bytes()) // write out if error, so we can still see.
+ // w.Write(bout) // write out if error, as much as possible, so we can still see.
+ return
+ }
+ w.Write(bout)
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 0000000..40065a0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,1271 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// During encode, we use a high-level condition to determine how to iterate through
+// the container. That decision is based on whether the container is text-based (with
+// separators) or binary (without separators). If binary, we do not even call the
+// encoding of separators.
+//
+// During decode, we use a different high-level condition to determine how to iterate
+// through the containers. That decision is based on whether the stream contained
+// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
+// it has to be binary, and we do not even try to read separators.
+//
+// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length.
+// It may suffer because we treat it like a text-based codec, and read separators.
+// However, this read is a no-op and the cost is insignificant.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+// - If struct, update fields from stream into fields of struct.
+// If field in stream not found in struct, handle appropriately (based on option).
+// If a struct field has no corresponding value in the stream, leave it AS IS.
+// If nil in stream, set value to nil/zero value.
+// - If map, update map from stream.
+// If the stream value is NIL, set the map to nil.
+// - if slice, try to update up to length of array in stream.
+// if container len is less than stream array length,
+// and container cannot be expanded, handled (based on option).
+// This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be omitted
+// if the zero value. The problem may occur during decode, where omitted values do not affect
+// the value being decoded into. This means that if decoding into a struct with an
+// int field with current value=5, and the field is omitted in the stream, then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+// map
+// - first collect all keys (e.g. in k1)
+// - for each key in stream, mark k1 that the key should not be removed
+// - after updating map, do second pass and call delete for all keys in k1 which are not marked
+// struct:
+// - for each field, track the *typeInfo s1
+// - iterate through all s1, and for each one not marked, set value to zero
+// - this involves checking the possible anonymous fields which are nil ptrs.
+// too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// The disadvantage is that small functions which use panics cannot be inlined.
+// The code accounts for that by only using panics behind an interface;
+// since interface calls cannot be inlined, this is irrelevant.
+//
+// We considered storing the error is En|Decoder.
+// - once it has its err field set, it cannot be used again.
+// - panicing will be optional, controlled by const flag.
+// - code should always check error first and return early.
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ scratchByteArrayLen = 32
+ initCollectionCap = 32 // 32 is defensive. 16 is preferred.
+
+ // Support encoding.(Binary|Text)(Unm|M)arshaler.
+ // This constant flag will enable or disable it.
+ supportMarshalInterfaces = true
+
+ // Each Encoder or Decoder uses a cache of functions based on conditionals,
+ // so that the conditionals are not run every time.
+ //
+ // Either a map or a slice is used to keep track of the functions.
+ // The map is more natural, but has a higher cost than a slice/array.
+ // This flag (useMapForCodecCache) controls which is used.
+ //
+ // From benchmarks, slices with linear search perform better with < 32 entries.
+ // We have typically seen a high threshold of about 24 entries.
+ useMapForCodecCache = false
+
+ // for debugging, set this to false, to catch panic traces.
+ // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
+ recoverPanicToErr = true
+
+ // Fast path functions try to create a fast path encode or decode implementation
+ // for common maps and slices, by by-passing reflection altogether.
+ fastpathEnabled = true
+
+ // if checkStructForEmptyValue, check structs fields to see if an empty value.
+ // This could be an expensive call, so possibly disable it.
+ checkStructForEmptyValue = false
+
+ // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
+ derefForIsEmptyValue = false
+
+ // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
+ // Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
+ // The chances of this are slim, so leave this "optimization".
+ // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value?
+ resetSliceElemToZeroValue bool = false
+)
+
+var (
+ oneByteArr = [1]byte{0}
+ zeroByteSlice = oneByteArr[:0:0]
+)
+
+type charEncoding uint8
+
+const (
+ c_RAW charEncoding = iota
+ c_UTF8
+ c_UTF16LE
+ c_UTF16BE
+ c_UTF32LE
+ c_UTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+ valueTypeUnset valueType = iota
+ valueTypeNil
+ valueTypeInt
+ valueTypeUint
+ valueTypeFloat
+ valueTypeBool
+ valueTypeString
+ valueTypeSymbol
+ valueTypeBytes
+ valueTypeMap
+ valueTypeArray
+ valueTypeTimestamp
+ valueTypeExt
+
+ // valueTypeInvalid = 0xff
+)
+
+type seqType uint8
+
+const (
+ _ seqType = iota
+ seqTypeArray
+ seqTypeSlice
+ seqTypeChan
+)
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+ _ containerState = iota
+
+ containerMapStart // slot left open, since Driver method already covers it
+ containerMapKey
+ containerMapValue
+ containerMapEnd
+ containerArrayStart // slot left open, since Driver methods already cover it
+ containerArrayElem
+ containerArrayEnd
+)
+
+type rgetPoolT struct {
+ encNames [8]string
+ fNames [8]string
+ etypes [8]uintptr
+ sfis [8]*structFieldInfo
+}
+
+var rgetPool = sync.Pool{
+ New: func() interface{} { return new(rgetPoolT) },
+}
+
+type rgetT struct {
+ fNames []string
+ encNames []string
+ etypes []uintptr
+ sfis []*structFieldInfo
+}
+
+type containerStateRecv interface {
+ sendContainerState(containerState)
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+type jsonMarshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+var (
+ bigen = binary.BigEndian
+ structInfoFieldName = "_struct"
+
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
+
+ stringTyp = reflect.TypeOf("")
+ timeTyp = reflect.TypeOf(time.Time{})
+ rawExtTyp = reflect.TypeOf(RawExt{})
+ uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+
+ mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+ binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+ textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+ jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+ selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+
+ uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
+ rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
+ intfTypId = reflect.ValueOf(intfTyp).Pointer()
+ timeTypId = reflect.ValueOf(timeTyp).Pointer()
+ stringTypId = reflect.ValueOf(stringTyp).Pointer()
+
+ mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
+ mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
+ intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
+ // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
+
+ intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
+ uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
+
+ bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ chkOvf checkOverflow
+
+ noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo")
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+type Selfer interface {
+ CodecEncodeSelf(*Encoder)
+ CodecDecodeSelf(*Decoder)
+}
+
+// MapBySlice represents a slice which should be encoded as a map in the stream.
+// The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// The support of MapBySlice affords the following:
+// - A slice type which implements MapBySlice will be encoded as a map
+// - A slice can be decoded from a map in the stream
+type MapBySlice interface {
+ MapBySlice()
+}
+
+// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+//
+// BasicHandle encapsulates the common options and extension functions.
+type BasicHandle struct {
+ // TypeInfos is used to get the type info for any type.
+ //
+ // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
+ TypeInfos *TypeInfos
+
+ extHandle
+ EncodeOptions
+ DecodeOptions
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+ return x
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ if x.TypeInfos != nil {
+ return x.TypeInfos.get(rtid, rt)
+ }
+ return defTypeInfos.get(rtid, rt)
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+ getBasicHandle() *BasicHandle
+ newEncDriver(w *Encoder) encDriver
+ newDecDriver(r *Decoder) decDriver
+ isBinary() bool
+}
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
+//
+// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value.
+type RawExt struct {
+ Tag uint64
+ // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value.
+ // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
+ Data []byte
+ // Value represents the extension, if Data is nil.
+ // Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
+ Value interface{}
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
+type BytesExt interface {
+ // WriteExt converts a value to a []byte.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
+ WriteExt(v interface{}) []byte
+
+ // ReadExt updates a value from a []byte.
+ ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
+type InterfaceExt interface {
+ // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
+ ConvertExt(v interface{}) interface{}
+
+ // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time.
+ UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+ BytesExt
+ InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported method.
+type addExtWrapper struct {
+ encFn func(reflect.Value) ([]byte, error)
+ decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+ bs, err := x.encFn(reflect.ValueOf(v))
+ if err != nil {
+ panic(err)
+ }
+ return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
+ panic(err)
+ }
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ x.ReadExt(dest, v.([]byte))
+}
+
+type setExtWrapper struct {
+ b BytesExt
+ i InterfaceExt
+}
+
+func (x *setExtWrapper) WriteExt(v interface{}) []byte {
+ if x.b == nil {
+ panic("BytesExt.WriteExt is not supported")
+ }
+ return x.b.WriteExt(v)
+}
+
+func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if x.b == nil {
+ panic("BytesExt.WriteExt is not supported")
+
+ }
+ x.b.ReadExt(v, bs)
+}
+
+func (x *setExtWrapper) ConvertExt(v interface{}) interface{} {
+ if x.i == nil {
+ panic("InterfaceExt.ConvertExt is not supported")
+
+ }
+ return x.i.ConvertExt(v)
+}
+
+func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ if x.i == nil {
+ panic("InterfaceExxt.UpdateExt is not supported")
+
+ }
+ x.i.UpdateExt(dest, v)
+}
+
+// type errorString string
+// func (x errorString) Error() string { return string(x) }
+
+type binaryEncodingType struct{}
+
+func (_ binaryEncodingType) isBinary() bool { return true }
+
+type textEncodingType struct{}
+
+func (_ textEncodingType) isBinary() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+type noBuiltInTypes struct{}
+
+func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false }
+func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+type noStreamingCodec struct{}
+
+func (_ noStreamingCodec) CheckBreak() bool { return false }
+
+// bigenHelper.
+// Users must already slice the x completely, because we will not reslice.
+type bigenHelper struct {
+ x []byte // must be correctly sliced to appropriate len. slicing is a cost.
+ w encWriter
+}
+
+func (z bigenHelper) writeUint16(v uint16) {
+ bigen.PutUint16(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint32(v uint32) {
+ bigen.PutUint32(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint64(v uint64) {
+ bigen.PutUint64(z.x, v)
+ z.w.writeb(z.x)
+}
+
+type extTypeTagFn struct {
+ rtid uintptr
+ rt reflect.Type
+ tag uint64
+ ext Ext
+}
+
+type extHandle []extTypeTagFn
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// AddExt registes an encode and decode function for a reflect.Type.
+// AddExt internally calls SetExt.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+func (o *extHandle) AddExt(
+ rt reflect.Type, tag byte,
+ encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error,
+) (err error) {
+ if encfn == nil || decfn == nil {
+ return o.SetExt(rt, uint64(tag), nil)
+ }
+ return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// Note that the type must be a named type, and specifically not
+// a pointer or Interface. An error is returned if that is not honored.
+//
+// To Deregister an ext, call SetExt with nil Ext
+func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+ // o is a pointer, because we may need to initialize it
+ if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
+ err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
+ reflect.Zero(rt).Interface())
+ return
+ }
+
+ rtid := reflect.ValueOf(rt).Pointer()
+ for _, v := range *o {
+ if v.rtid == rtid {
+ v.tag, v.ext = tag, ext
+ return
+ }
+ }
+
+ if *o == nil {
+ *o = make([]extTypeTagFn, 0, 4)
+ }
+ *o = append(*o, extTypeTagFn{rtid, rt, tag, ext})
+ return
+}
+
+func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
+ if v.rtid == rtid {
+ return v
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
+ if v.tag == tag {
+ return v
+ }
+ }
+ return nil
+}
+
+type structFieldInfo struct {
+ encName string // encode name
+
+ // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
+
+ is []int // (recursive/embedded) field index in struct
+ i int16 // field index in struct
+ omitEmpty bool
+ toArray bool // if field is _struct, is the toArray set?
+}
+
+// func (si *structFieldInfo) isZero() bool {
+// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray
+// }
+
+// rv returns the field of the struct.
+// If anonymous, it returns an Invalid
+func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) {
+ if si.i != -1 {
+ v = v.Field(int(si.i))
+ return v
+ }
+ // replicate FieldByIndex
+ for _, x := range si.is {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ if !update {
+ return
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
+ if si.i != -1 {
+ v = v.Field(int(si.i))
+ v.Set(reflect.Zero(v.Type()))
+ // v.Set(reflect.New(v.Type()).Elem())
+ // v.Set(reflect.New(v.Type()))
+ } else {
+ // replicate FieldByIndex
+ for _, x := range si.is {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ }
+ v = v.Field(x)
+ }
+ v.Set(reflect.Zero(v.Type()))
+ }
+}
+
+func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
+ // if fname == "" {
+ // panic(noFieldNameToStructFieldInfoErr)
+ // }
+ si := structFieldInfo{
+ encName: fname,
+ }
+
+ if stag != "" {
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ if s != "" {
+ si.encName = s
+ }
+ } else {
+ if s == "omitempty" {
+ si.omitEmpty = true
+ } else if s == "toarray" {
+ si.toArray = true
+ }
+ }
+ }
+ }
+ // si.encNameBs = []byte(si.encName)
+ return &si
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+ return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+ return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+// typeInfo keeps information about each type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+// - If base is a built in type, en/decode base value
+// - If base is registered as an extension, en/decode base value
+// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+// - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+ sfi []*structFieldInfo // sorted. Used when enc/dec struct to map.
+ sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+ rt reflect.Type
+ rtid uintptr
+
+ numMeth uint16 // number of methods
+
+ // baseId gives pointer to the base reflect.Type, after deferencing
+ // the pointers. E.g. base type of ***time.Time is time.Time.
+ base reflect.Type
+ baseId uintptr
+ baseIndir int8 // number of indirections to get to base
+
+ mbs bool // base type (T or *T) is a MapBySlice
+
+ bm bool // base type (T or *T) is a binaryMarshaler
+ bunm bool // base type (T or *T) is a binaryUnmarshaler
+ bmIndir int8 // number of indirections to get to binaryMarshaler type
+ bunmIndir int8 // number of indirections to get to binaryUnmarshaler type
+
+ tm bool // base type (T or *T) is a textMarshaler
+ tunm bool // base type (T or *T) is a textUnmarshaler
+ tmIndir int8 // number of indirections to get to textMarshaler type
+ tunmIndir int8 // number of indirections to get to textUnmarshaler type
+
+ jm bool // base type (T or *T) is a jsonMarshaler
+ junm bool // base type (T or *T) is a jsonUnmarshaler
+ jmIndir int8 // number of indirections to get to jsonMarshaler type
+ junmIndir int8 // number of indirections to get to jsonUnmarshaler type
+
+ cs bool // base type (T or *T) is a Selfer
+ csIndir int8 // number of indirections to get to Selfer type
+
+ toArray bool // whether this (struct) type should be encoded as an array
+}
+
+func (ti *typeInfo) indexForEncName(name string) int {
+ //tisfi := ti.sfi
+ const binarySearchThreshold = 16
+ if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
+ // linear search. faster than binary search in my testing up to 16-field structs.
+ for i, si := range ti.sfi {
+ if si.encName == name {
+ return i
+ }
+ }
+ } else {
+ // binary search. adapted from sort/search.go.
+ h, i, j := 0, 0, sfilen
+ for i < j {
+ h = i + (j-i)/2
+ if ti.sfi[h].encName < name {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < sfilen && ti.sfi[i].encName == name {
+ return i
+ }
+ }
+ return -1
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+ infos map[uintptr]*typeInfo
+ mu sync.RWMutex
+ tags []string
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+ return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+ // check for tags: codec, json, in that order.
+ // this allows seamless support for many configured structs.
+ for _, x := range x.tags {
+ s = t.Get(x)
+ if s != "" {
+ return s
+ }
+ }
+ return
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ var ok bool
+ x.mu.RLock()
+ pti, ok = x.infos[rtid]
+ x.mu.RUnlock()
+ if ok {
+ return
+ }
+
+ // do not hold lock while computing this.
+ // it may lead to duplication, but that's ok.
+ ti := typeInfo{rt: rt, rtid: rtid}
+ ti.numMeth = uint16(rt.NumMethod())
+
+ var indir int8
+ if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
+ ti.bm, ti.bmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
+ ti.bunm, ti.bunmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, textMarshalerTyp); ok {
+ ti.tm, ti.tmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
+ ti.tunm, ti.tunmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok {
+ ti.jm, ti.jmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok {
+ ti.junm, ti.junmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, selferTyp); ok {
+ ti.cs, ti.csIndir = true, indir
+ }
+ if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
+ ti.mbs = true
+ }
+
+ pt := rt
+ var ptIndir int8
+ // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { }
+ for pt.Kind() == reflect.Ptr {
+ pt = pt.Elem()
+ ptIndir++
+ }
+ if ptIndir == 0 {
+ ti.base = rt
+ ti.baseId = rtid
+ } else {
+ ti.base = pt
+ ti.baseId = reflect.ValueOf(pt).Pointer()
+ ti.baseIndir = ptIndir
+ }
+
+ if rt.Kind() == reflect.Struct {
+ var siInfo *structFieldInfo
+ if f, ok := rt.FieldByName(structInfoFieldName); ok {
+ siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
+ ti.toArray = siInfo.toArray
+ }
+ pi := rgetPool.Get()
+ pv := pi.(*rgetPoolT)
+ pv.etypes[0] = ti.baseId
+ vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
+ x.rget(rt, rtid, nil, &vv, siInfo)
+ ti.sfip = make([]*structFieldInfo, len(vv.sfis))
+ ti.sfi = make([]*structFieldInfo, len(vv.sfis))
+ copy(ti.sfip, vv.sfis)
+ sort.Sort(sfiSortedByEncName(vv.sfis))
+ copy(ti.sfi, vv.sfis)
+ rgetPool.Put(pi)
+ }
+ // sfi = sfip
+
+ x.mu.Lock()
+ if pti, ok = x.infos[rtid]; !ok {
+ pti = &ti
+ x.infos[rtid] = pti
+ }
+ x.mu.Unlock()
+ return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
+ indexstack []int, pv *rgetT, siInfo *structFieldInfo,
+) {
+ // This will read up the fields and store how to access the value.
+ // It uses the go language's rules for embedding, as below:
+ // - if a field has been seen while traversing, skip it
+ // - if an encName has been seen while traversing, skip it
+ // - if an embedded type has been seen, skip it
+ //
+ // Also, per Go's rules, embedded fields must be analyzed AFTER all top-level fields.
+ //
+ // Note: we consciously use slices, not a map, to simulate a set.
+ // Typically, types have < 16 fields, and iteration using equals is faster than maps there
+
+ type anonField struct {
+ ft reflect.Type
+ idx int
+ }
+
+ var anonFields []anonField
+
+LOOP:
+ for j, jlen := 0, rt.NumField(); j < jlen; j++ {
+ f := rt.Field(j)
+ fkind := f.Type.Kind()
+ // skip if a func type, or is unexported, or structTag value == "-"
+ switch fkind {
+ case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
+ continue LOOP
+ }
+
+ // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
+ if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
+ continue
+ }
+ stag := x.structTag(f.Tag)
+ if stag == "-" {
+ continue
+ }
+ var si *structFieldInfo
+ // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
+ if f.Anonymous && fkind != reflect.Interface {
+ doInline := stag == ""
+ if !doInline {
+ si = parseStructFieldInfo("", stag)
+ doInline = si.encName == ""
+ // doInline = si.isZero()
+ }
+ if doInline {
+ ft := f.Type
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if ft.Kind() == reflect.Struct {
+ // handle anonymous fields after handling all the non-anon fields
+ anonFields = append(anonFields, anonField{ft, j})
+ continue
+ }
+ }
+ }
+
+ // after the anonymous dance: if an unexported field, skip
+ if f.PkgPath != "" { // unexported
+ continue
+ }
+
+ if f.Name == "" {
+ panic(noFieldNameToStructFieldInfoErr)
+ }
+
+ for _, k := range pv.fNames {
+ if k == f.Name {
+ continue LOOP
+ }
+ }
+ pv.fNames = append(pv.fNames, f.Name)
+
+ if si == nil {
+ si = parseStructFieldInfo(f.Name, stag)
+ } else if si.encName == "" {
+ si.encName = f.Name
+ }
+
+ for _, k := range pv.encNames {
+ if k == si.encName {
+ continue LOOP
+ }
+ }
+ pv.encNames = append(pv.encNames, si.encName)
+
+ // si.ikind = int(f.Type.Kind())
+ if len(indexstack) == 0 {
+ si.i = int16(j)
+ } else {
+ si.i = -1
+ si.is = make([]int, len(indexstack)+1)
+ copy(si.is, indexstack)
+ si.is[len(indexstack)] = j
+ // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ }
+
+ if siInfo != nil {
+ if siInfo.omitEmpty {
+ si.omitEmpty = true
+ }
+ }
+ pv.sfis = append(pv.sfis, si)
+ }
+
+ // now handle anonymous fields
+LOOP2:
+ for _, af := range anonFields {
+ // if etypes contains this, then do not call rget again (as the fields are already seen here)
+ ftid := reflect.ValueOf(af.ft).Pointer()
+ for _, k := range pv.etypes {
+ if k == ftid {
+ continue LOOP2
+ }
+ }
+ pv.etypes = append(pv.etypes, ftid)
+
+ indexstack2 := make([]int, len(indexstack)+1)
+ copy(indexstack2, indexstack)
+ indexstack2[len(indexstack)] = af.idx
+ // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ x.rget(af.ft, ftid, indexstack2, pv, siInfo)
+ }
+}
+
+func panicToErr(err *error) {
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ //debug.PrintStack()
+ panicValToErr(x, err)
+ }
+ }
+}
+
+// func doPanic(tag string, format string, params ...interface{}) {
+// params2 := make([]interface{}, len(params)+1)
+// params2[0] = tag
+// copy(params2[1:], params)
+// panic(fmt.Errorf("%s: "+format, params2...))
+// }
+
+func isImmutableKind(k reflect.Kind) (v bool) {
+ return false ||
+ k == reflect.Int ||
+ k == reflect.Int8 ||
+ k == reflect.Int16 ||
+ k == reflect.Int32 ||
+ k == reflect.Int64 ||
+ k == reflect.Uint ||
+ k == reflect.Uint8 ||
+ k == reflect.Uint16 ||
+ k == reflect.Uint32 ||
+ k == reflect.Uint64 ||
+ k == reflect.Uintptr ||
+ k == reflect.Float32 ||
+ k == reflect.Float64 ||
+ k == reflect.Bool ||
+ k == reflect.String
+}
+
+// these functions must be inlinable, and not call anybody
+type checkOverflow struct{}
+
+func (_ checkOverflow) Float32(f float64) (overflow bool) {
+ if f < 0 {
+ f = -f
+ }
+ return math.MaxFloat32 < f && f <= math.MaxFloat64
+}
+
+func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+
+func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+
+func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) {
+ //e.g. -127 to 128 for int8
+ pos := (v >> 63) == 0
+ ui2 := v & 0x7fffffffffffffff
+ if pos {
+ if ui2 > math.MaxInt64 {
+ overflow = true
+ return
+ }
+ } else {
+ if ui2 > math.MaxInt64-1 {
+ overflow = true
+ return
+ }
+ }
+ i = int64(v)
+ return
+}
+
+// ------------------ SORT -----------------
+
+func isNaN(f float64) bool { return f != f }
+
+// -----------------------
+
+type intSlice []int64
+type uintSlice []uint64
+type floatSlice []float64
+type boolSlice []bool
+type stringSlice []string
+type bytesSlice [][]byte
+
+func (p intSlice) Len() int { return len(p) }
+func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintSlice) Len() int { return len(p) }
+func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatSlice) Len() int { return len(p) }
+func (p floatSlice) Less(i, j int) bool {
+ return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
+}
+func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringSlice) Len() int { return len(p) }
+func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesSlice) Len() int { return len(p) }
+func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
+func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolSlice) Len() int { return len(p) }
+func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
+func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ---------------------
+
+type intRv struct {
+ v int64
+ r reflect.Value
+}
+type intRvSlice []intRv
+type uintRv struct {
+ v uint64
+ r reflect.Value
+}
+type uintRvSlice []uintRv
+type floatRv struct {
+ v float64
+ r reflect.Value
+}
+type floatRvSlice []floatRv
+type boolRv struct {
+ v bool
+ r reflect.Value
+}
+type boolRvSlice []boolRv
+type stringRv struct {
+ v string
+ r reflect.Value
+}
+type stringRvSlice []stringRv
+type bytesRv struct {
+ v []byte
+ r reflect.Value
+}
+type bytesRvSlice []bytesRv
+
+func (p intRvSlice) Len() int { return len(p) }
+func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintRvSlice) Len() int { return len(p) }
+func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatRvSlice) Len() int { return len(p) }
+func (p floatRvSlice) Less(i, j int) bool {
+ return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
+}
+func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringRvSlice) Len() int { return len(p) }
+func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesRvSlice) Len() int { return len(p) }
+func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolRvSlice) Len() int { return len(p) }
+func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
+func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type bytesI struct {
+ v []byte
+ i interface{}
+}
+
+type bytesISlice []bytesI
+
+func (p bytesISlice) Len() int { return len(p) }
+func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type set []uintptr
+
+func (s *set) add(v uintptr) (exists bool) {
+ // e.ci is always nil, or len >= 1
+ // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }()
+ x := *s
+ if x == nil {
+ x = make([]uintptr, 1, 8)
+ x[0] = v
+ *s = x
+ return
+ }
+ // typically, length will be 1. make this perform.
+ if len(x) == 1 {
+ if j := x[0]; j == 0 {
+ x[0] = v
+ } else if j == v {
+ exists = true
+ } else {
+ x = append(x, v)
+ *s = x
+ }
+ return
+ }
+ // check if it exists
+ for _, j := range x {
+ if j == v {
+ exists = true
+ return
+ }
+ }
+ // try to replace a "deleted" slot
+ for i, j := range x {
+ if j == 0 {
+ x[i] = v
+ return
+ }
+ }
+ // if unable to replace deleted slot, just append it.
+ x = append(x, v)
+ *s = x
+ return
+}
+
+func (s *set) remove(v uintptr) (exists bool) {
+ // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }()
+ x := *s
+ if len(x) == 0 {
+ return
+ }
+ if len(x) == 1 {
+ if x[0] == v {
+ x[0] = 0
+ }
+ return
+ }
+ for i, j := range x {
+ if j == v {
+ exists = true
+ x[i] = 0 // set it to 0, as way to delete it.
+ // copy(x[i:], x[i+1:])
+ // x = x[:len(x)-1]
+ return
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 0000000..dea981f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+func panicValToErr(panicVal interface{}, err *error) {
+ if panicVal == nil {
+ return
+ }
+ // case nil
+ switch xerr := panicVal.(type) {
+ case error:
+ *err = xerr
+ case string:
+ *err = errors.New(xerr)
+ default:
+ *err = fmt.Errorf("%v", panicVal)
+ }
+ return
+}
+
+func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if deref {
+ if v.IsNil() {
+ return true
+ }
+ return hIsEmptyValue(v.Elem(), deref, checkStruct)
+ } else {
+ return v.IsNil()
+ }
+ case reflect.Struct:
+ if !checkStruct {
+ return false
+ }
+ // return true if all fields are empty. else return false.
+ // we cannot use equality check, because some fields may be maps/slices/etc
+ // and consequently the structs are not comparable.
+ // return v.Interface() == reflect.Zero(v.Type()).Interface()
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
+}
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+ if len(v) < 2 {
+ } else if pos && v[0] == 0 {
+ for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+ }
+ } else if !pos && v[0] == 0xff {
+ for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+ }
+ }
+ return
+}
+
+func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
+ if typ == nil {
+ return
+ }
+ rt := typ
+ // The type might be a pointer and we need to keep
+ // dereferencing to the base type until we find an implementation.
+ for {
+ if rt.Implements(iTyp) {
+ return true, indir
+ }
+ if p := rt; p.Kind() == reflect.Ptr {
+ indir++
+ if indir >= math.MaxInt8 { // insane number of indirections
+ return false, 0
+ }
+ rt = p.Elem()
+ continue
+ }
+ break
+ }
+ // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
+ if typ.Kind() != reflect.Ptr {
+ // Not a pointer, but does the pointer work?
+ if reflect.PtrTo(typ).Implements(iTyp) {
+ return true, -1
+ }
+ }
+ return false, 0
+}
+
+// validate that this function is correct ...
+// culled from OGRE (Object-Oriented Graphics Rendering Engine)
+// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
+func halfFloatToFloatBits(yy uint16) (d uint32) {
+ y := uint32(yy)
+ s := (y >> 15) & 0x01
+ e := (y >> 10) & 0x1f
+ m := y & 0x03ff
+
+ if e == 0 {
+ if m == 0 { // plu or minus 0
+ return s << 31
+ } else { // Denormalized number -- renormalize it
+ for (m & 0x00000400) == 0 {
+ m <<= 1
+ e -= 1
+ }
+ e += 1
+ const zz uint32 = 0x0400
+ m &= ^zz
+ }
+ } else if e == 31 {
+ if m == 0 { // Inf
+ return (s << 31) | 0x7f800000
+ } else { // NaN
+ return (s << 31) | 0x7f800000 | (m << 13)
+ }
+ }
+ e = e + (127 - 15)
+ m = m << 13
+ return (s << 31) | (e << 23) | m
+}
+
+// GrowCap will return a new capacity for a slice, given the following:
+// - oldCap: current capacity
+// - unit: in-memory size of an element
+// - num: number of elements to add
+func growCap(oldCap, unit, num int) (newCap int) {
+ // appendslice logic (if cap < 1024, *2, else *1.25):
+ // leads to many copy calls, especially when copying bytes.
+ // bytes.Buffer model (2*cap + n): much better for bytes.
+ // smarter way is to take the byte-size of the appended element(type) into account
+
+ // maintain 3 thresholds:
+ // t1: if cap <= t1, newcap = 2x
+ // t2: if cap <= t2, newcap = 1.75x
+ // t3: if cap <= t3, newcap = 1.5x
+ // else newcap = 1.25x
+ //
+ // t1, t2, t3 >= 1024 always.
+ // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
+ //
+ // With this, appending for bytes increase by:
+ // 100% up to 4K
+ // 75% up to 8K
+ // 50% up to 16K
+ // 25% beyond that
+
+ // unit can be 0 e.g. for struct{}{}; handle that appropriately
+ var t1, t2, t3 int // thresholds
+ if unit <= 1 {
+ t1, t2, t3 = 4*1024, 8*1024, 16*1024
+ } else if unit < 16 {
+ t3 = 16 / unit * 1024
+ t1 = t3 * 1 / 4
+ t2 = t3 * 2 / 4
+ } else {
+ t1, t2, t3 = 1024, 1024, 1024
+ }
+
+ var x int // temporary variable
+
+ // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
+ if oldCap <= t1 { // [0,t1]
+ x = 8
+ } else if oldCap > t3 { // (t3,infinity]
+ x = 5
+ } else if oldCap <= t2 { // (t1,t2]
+ x = 7
+ } else { // (t2,t3]
+ x = 6
+ }
+ newCap = x * oldCap / 4
+
+ if num > 0 {
+ newCap += num
+ }
+
+ // ensure newCap is a multiple of 64 (if it is > 64) or 16.
+ if newCap > 64 {
+ if x = newCap % 64; x != 0 {
+ x = newCap / 64
+ newCap = 64 * (x + 1)
+ }
+ } else {
+ if x = newCap % 16; x != 0 {
+ x = newCap / 16
+ newCap = 16 * (x + 1)
+ }
+ }
+ return
+}
+
+func expandSliceValue(s reflect.Value, num int) reflect.Value {
+ if num <= 0 {
+ return s
+ }
+ l0 := s.Len()
+ l1 := l0 + num // new slice length
+ if l1 < l0 {
+ panic("ExpandSlice: slice overflow")
+ }
+ c0 := s.Cap()
+ if l1 <= c0 {
+ return s.Slice(0, l1)
+ }
+ st := s.Type()
+ c1 := growCap(c0, int(st.Elem().Size()), num)
+ s2 := reflect.MakeSlice(st, l1, c1)
+ // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1)
+ reflect.Copy(s2, s)
+ return s2
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 0000000..7c2ffc0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,20 @@
+//+build !unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ return string(v)
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+ return []byte(v)
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 0000000..373b2b1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,45 @@
+//+build unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "unsafe"
+)
+
+// This file has unsafe variants of some helper methods.
+
+type unsafeString struct {
+ Data uintptr
+ Len int
+}
+
+type unsafeBytes struct {
+ Data uintptr
+ Len int
+ Cap int
+}
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ if len(v) == 0 {
+ return ""
+ }
+ x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)}
+ return *(*string)(unsafe.Pointer(&x))
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+ if len(v) == 0 {
+ return zeroByteSlice
+ }
+ x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)}
+ return *(*[]byte)(unsafe.Pointer(&x))
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/json.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 0000000..a04dfcb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1213 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// Note:
+// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
+// We implement it here.
+// - Also, strconv.ParseXXX for floats and integers
+// - only works on strings resulting in unnecessary allocation and []byte-string conversion.
+// - it does a lot of redundant checks, because json numbers are simpler that what it supports.
+// - We parse numbers (floats and integers) directly here.
+// We only delegate parsing floats if it is a hairy float which could cause a loss of precision.
+// In that case, we delegate to strconv.ParseFloat.
+//
+// Note:
+// - encode does not beautify. There is no whitespace when encoding.
+// - rpc calls which take single integer arguments or write single numeric arguments will need care.
+
+// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
+// MUST not call one-another.
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+//--------------------------------
+
+var (
+ jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
+
+ jsonFloat64Pow10 = [...]float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+ }
+
+ jsonUint64Pow10 = [...]uint64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ }
+
+ // jsonTabs and jsonSpaces are used as caches for indents
+ jsonTabs, jsonSpaces string
+)
+
+const (
+ // jsonUnreadAfterDecNum controls whether we unread after decoding a number.
+ //
+ // instead of unreading, just update d.tok (iff it's not a whitespace char)
+ // However, doing this means that we may HOLD onto some data which belongs to another stream.
+ // Thus, it is safest to unread the data when done.
+ // keep behind a constant flag for now.
+ jsonUnreadAfterDecNum = true
+
+ // If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
+ // - If we see first character of null, false or true,
+ // do not validate subsequent characters.
+ // - e.g. if we see a n, assume null and skip next 3 characters,
+ // and do not validate they are ull.
+ // P.S. Do not expect a significant decoding boost from this.
+ jsonValidateSymbols = true
+
+ // if jsonTruncateMantissa, truncate mantissa if trailing 0's.
+ // This is important because it could allow some floats to be decoded without
+ // deferring to strconv.ParseFloat.
+ jsonTruncateMantissa = true
+
+ // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow
+ jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+
+ // if mantissa >= jsonNumUintMaxVal, this is an overflow
+ jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+ // jsonNumDigitsUint64Largest = 19
+
+ jsonSpacesOrTabsLen = 128
+)
+
+func init() {
+ var bs [jsonSpacesOrTabsLen]byte
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ bs[i] = ' '
+ }
+ jsonSpaces = string(bs[:])
+
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ bs[i] = '\t'
+ }
+ jsonTabs = string(bs[:])
+}
+
+type jsonEncDriver struct {
+ e *Encoder
+ w encWriter
+ h *JsonHandle
+ b [64]byte // scratch
+ bs []byte // scratch
+ se setExtWrapper
+ ds string // indent string
+ dl uint16 // indent level
+ dt bool // indent using tabs
+ d bool // indent
+ c containerState
+ noBuiltInTypes
+}
+
+// indent is done as below:
+// - newline and indent are added before each mapKey or arrayElem
+// - newline and indent are added before each ending,
+// except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriver) sendContainerState(c containerState) {
+ // determine whether to output separators
+ if c == containerMapKey {
+ if e.c != containerMapStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ } else if c == containerMapValue {
+ if e.d {
+ e.w.writen2(':', ' ')
+ } else {
+ e.w.writen1(':')
+ }
+ } else if c == containerMapEnd {
+ if e.d {
+ e.dl--
+ if e.c != containerMapStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1('}')
+ } else if c == containerArrayElem {
+ if e.c != containerArrayStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ } else if c == containerArrayEnd {
+ if e.d {
+ e.dl--
+ if e.c != containerArrayStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1(']')
+ }
+ e.c = c
+}
+
+func (e *jsonEncDriver) writeIndent() {
+ e.w.writen1('\n')
+ if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen {
+ if e.dt {
+ e.w.writestr(jsonTabs[:x])
+ } else {
+ e.w.writestr(jsonSpaces[:x])
+ }
+ } else {
+ for i := uint16(0); i < e.dl; i++ {
+ e.w.writestr(e.ds)
+ }
+ }
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+ e.w.writeb(jsonLiterals[9:13]) // null
+}
+
+func (e *jsonEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writeb(jsonLiterals[0:4]) // true
+ } else {
+ e.w.writeb(jsonLiterals[4:9]) // false
+ }
+}
+
+func (e *jsonEncDriver) EncodeFloat32(f float32) {
+ e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
+}
+
+func (e *jsonEncDriver) EncodeFloat64(f float64) {
+ // e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
+ e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
+}
+
+func (e *jsonEncDriver) EncodeInt(v int64) {
+ if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) {
+ e.w.writen1('"')
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+ e.w.writen1('"')
+ return
+ }
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeUint(v uint64) {
+ if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 {
+ e.w.writen1('"')
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+ e.w.writen1('"')
+ return
+ }
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ if v := ext.ConvertExt(rv); v == nil {
+ e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ // only encodes re.Value (never re.Data)
+ if re.Value == nil {
+ e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *jsonEncDriver) EncodeArrayStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('[')
+ e.c = containerArrayStart
+}
+
+func (e *jsonEncDriver) EncodeMapStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('{')
+ e.c = containerMapStart
+}
+
+func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
+ // e.w.writestr(strconv.Quote(v))
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeSymbol(v string) {
+ // e.EncodeString(c_UTF8, v)
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ // if encoding raw bytes and RawBytesExt is configured, use it to encode
+ if c == c_RAW && e.se.i != nil {
+ e.EncodeExt(v, 0, &e.se, e.e)
+ return
+ }
+ if c == c_RAW {
+ slen := base64.StdEncoding.EncodedLen(len(v))
+ if cap(e.bs) >= slen {
+ e.bs = e.bs[:slen]
+ } else {
+ e.bs = make([]byte, slen)
+ }
+ base64.StdEncoding.Encode(e.bs, v)
+ e.w.writen1('"')
+ e.w.writeb(e.bs)
+ e.w.writen1('"')
+ } else {
+ // e.EncodeString(c, string(v))
+ e.quoteStr(stringView(v))
+ }
+}
+
+func (e *jsonEncDriver) EncodeAsis(v []byte) {
+ e.w.writeb(v)
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+ // adapted from std pkg encoding/json
+ const hex = "0123456789abcdef"
+ w := e.w
+ w.writen1('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ w.writen2('\\', b)
+ case '\n':
+ w.writen2('\\', 'n')
+ case '\r':
+ w.writen2('\\', 'r')
+ case '\b':
+ w.writen2('\\', 'b')
+ case '\f':
+ w.writen2('\\', 'f')
+ case '\t':
+ w.writen2('\\', 't')
+ default:
+ // encode all bytes < 0x20 (except \r, \n).
+ // also encode < > & to prevent security holes when served to some browsers.
+ w.writestr(`\u00`)
+ w.writen2(hex[b>>4], hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+ // Both technically valid JSON, but bomb on JSONP, so fix here.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\u202`)
+ w.writen1(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ w.writestr(s[start:])
+ }
+ w.writen1('"')
+}
+
+//--------------------------------
+
+type jsonNum struct {
+ // bytes []byte // may have [+-.eE0-9]
+ mantissa uint64 // where mantissa ends, and maybe dot begins.
+ exponent int16 // exponent value.
+ manOverflow bool
+ neg bool // started with -. No initial sign in the bytes above.
+ dot bool // has dot
+ explicitExponent bool // explicit exponent
+}
+
+func (x *jsonNum) reset() {
+ x.manOverflow = false
+ x.neg = false
+ x.dot = false
+ x.explicitExponent = false
+ x.mantissa = 0
+ x.exponent = 0
+}
+
+// uintExp is called only if exponent > 0.
+func (x *jsonNum) uintExp() (n uint64, overflow bool) {
+ n = x.mantissa
+ e := x.exponent
+ if e >= int16(len(jsonUint64Pow10)) {
+ overflow = true
+ return
+ }
+ n *= jsonUint64Pow10[e]
+ if n < x.mantissa || n > jsonNumUintMaxVal {
+ overflow = true
+ return
+ }
+ return
+ // for i := int16(0); i < e; i++ {
+ // if n >= jsonNumUintCutoff {
+ // overflow = true
+ // return
+ // }
+ // n *= 10
+ // }
+ // return
+}
+
+// these constants are only used withn floatVal.
+// They are brought out, so that floatVal can be inlined.
+const (
+ jsonUint64MantissaBits = 52
+ jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1
+)
+
+func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
+ // We do not want to lose precision.
+ // Consequently, we will delegate to strconv.ParseFloat if any of the following happen:
+ // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits)
+ // We expect up to 99.... (19 digits)
+ // - The mantissa cannot fit into a 52 bits of uint64
+ // - The exponent is beyond our scope ie beyong 22.
+ parseUsingStrConv = x.manOverflow ||
+ x.exponent > jsonMaxExponent ||
+ (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) ||
+ x.mantissa>>jsonUint64MantissaBits != 0
+
+ if parseUsingStrConv {
+ return
+ }
+
+ // all good. so handle parse here.
+ f = float64(x.mantissa)
+ // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f)
+ if x.neg {
+ f = -f
+ }
+ if x.exponent > 0 {
+ f *= jsonFloat64Pow10[x.exponent]
+ } else if x.exponent < 0 {
+ f /= jsonFloat64Pow10[-x.exponent]
+ }
+ return
+}
+
+type jsonDecDriver struct {
+ noBuiltInTypes
+ d *Decoder
+ h *JsonHandle
+ r decReader
+
+ c containerState
+ // tok is used to store the token read right after skipWhiteSpace.
+ tok uint8
+
+ bstr [8]byte // scratch used for string \UXXX parsing
+ b [64]byte // scratch, used for parsing strings or numbers
+ b2 [64]byte // scratch, used only for decodeBytes (after base64)
+ bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
+
+ se setExtWrapper
+
+ n jsonNum
+}
+
+func jsonIsWS(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+}
+
+// // This will skip whitespace characters and return the next byte to read.
+// // The next byte determines what the value will be one of.
+// func (d *jsonDecDriver) skipWhitespace() {
+// // fast-path: do not enter loop. Just check first (in case no whitespace).
+// b := d.r.readn1()
+// if jsonIsWS(b) {
+// r := d.r
+// for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+// }
+// }
+// d.tok = b
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+ if d.tok != 0 {
+ d.r.unreadn1()
+ d.tok = 0
+ }
+}
+
+func (d *jsonDecDriver) sendContainerState(c containerState) {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ var xc uint8 // char expected
+ if c == containerMapKey {
+ if d.c != containerMapStart {
+ xc = ','
+ }
+ } else if c == containerMapValue {
+ xc = ':'
+ } else if c == containerMapEnd {
+ xc = '}'
+ } else if c == containerArrayElem {
+ if d.c != containerArrayStart {
+ xc = ','
+ }
+ } else if c == containerArrayEnd {
+ xc = ']'
+ }
+ if xc != 0 {
+ if d.tok != xc {
+ d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ }
+ d.c = c
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == '}' || d.tok == ']' {
+ // d.tok = 0 // only checking, not consuming
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
+ bs := d.r.readx(int(toIdx - fromIdx))
+ d.tok = 0
+ if jsonValidateSymbols {
+ if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
+ d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
+ return
+ }
+ }
+}
+
+func (d *jsonDecDriver) TryDecodeAsNil() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == 'n' {
+ d.readStrIdx(10, 13) // ull
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) DecodeBool() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == 'f' {
+ d.readStrIdx(5, 9) // alse
+ return false
+ }
+ if d.tok == 't' {
+ d.readStrIdx(1, 4) // rue
+ return true
+ }
+ d.d.errorf("json: decode bool: got first char %c", d.tok)
+ return false // "unreachable"
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '{' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapStart
+ return -1
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '[' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok)
+ }
+ d.tok = 0
+ d.c = containerArrayStart
+ return -1
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+ // check container type by checking the first char
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if b := d.tok; b == '{' {
+ return valueTypeMap
+ } else if b == '[' {
+ return valueTypeArray
+ } else if b == 'n' {
+ return valueTypeNil
+ } else if b == '"' {
+ return valueTypeString
+ }
+ return valueTypeUnset
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // return false // "unreachable"
+}
+
+func (d *jsonDecDriver) decNum(storeBytes bool) {
+ // If it is has a . or an e|E, decode as a float; else decode as an int.
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ b := d.tok
+ var str bool
+ if b == '"' {
+ str = true
+ b = d.r.readn1()
+ }
+ if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
+ d.d.errorf("json: decNum: got first char '%c'", b)
+ return
+ }
+ d.tok = 0
+
+ const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+ const jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+ n := &d.n
+ r := d.r
+ n.reset()
+ d.bs = d.bs[:0]
+
+ if str && storeBytes {
+ d.bs = append(d.bs, '"')
+ }
+
+ // The format of a number is as below:
+ // parsing: sign? digit* dot? digit* e? sign? digit*
+ // states: 0 1* 2 3* 4 5* 6 7
+ // We honor this state so we can break correctly.
+ var state uint8 = 0
+ var eNeg bool
+ var e int16
+ var eof bool
+LOOP:
+ for !eof {
+ // fmt.Printf("LOOP: b: %q\n", b)
+ switch b {
+ case '+':
+ switch state {
+ case 0:
+ state = 2
+ // do not add sign to the slice ...
+ b, eof = r.readn1eof()
+ continue
+ case 6: // typ = jsonNumFloat
+ state = 7
+ default:
+ break LOOP
+ }
+ case '-':
+ switch state {
+ case 0:
+ state = 2
+ n.neg = true
+ // do not add sign to the slice ...
+ b, eof = r.readn1eof()
+ continue
+ case 6: // typ = jsonNumFloat
+ eNeg = true
+ state = 7
+ default:
+ break LOOP
+ }
+ case '.':
+ switch state {
+ case 0, 2: // typ = jsonNumFloat
+ state = 4
+ n.dot = true
+ default:
+ break LOOP
+ }
+ case 'e', 'E':
+ switch state {
+ case 0, 2, 4: // typ = jsonNumFloat
+ state = 6
+ // n.mantissaEndIndex = int16(len(n.bytes))
+ n.explicitExponent = true
+ default:
+ break LOOP
+ }
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ switch state {
+ case 0:
+ state = 2
+ fallthrough
+ case 2:
+ fallthrough
+ case 4:
+ if n.dot {
+ n.exponent--
+ }
+ if n.mantissa >= jsonNumUintCutoff {
+ n.manOverflow = true
+ break
+ }
+ v := uint64(b - '0')
+ n.mantissa *= 10
+ if v != 0 {
+ n1 := n.mantissa + v
+ if n1 < n.mantissa || n1 > jsonNumUintMaxVal {
+ n.manOverflow = true // n+v overflows
+ break
+ }
+ n.mantissa = n1
+ }
+ case 6:
+ state = 7
+ fallthrough
+ case 7:
+ if !(b == '0' && e == 0) {
+ e = e*10 + int16(b-'0')
+ }
+ default:
+ break LOOP
+ }
+ case '"':
+ if str {
+ if storeBytes {
+ d.bs = append(d.bs, '"')
+ }
+ b, eof = r.readn1eof()
+ }
+ break LOOP
+ default:
+ break LOOP
+ }
+ if storeBytes {
+ d.bs = append(d.bs, b)
+ }
+ b, eof = r.readn1eof()
+ }
+
+ if jsonTruncateMantissa && n.mantissa != 0 {
+ for n.mantissa%10 == 0 {
+ n.mantissa /= 10
+ n.exponent++
+ }
+ }
+
+ if e != 0 {
+ if eNeg {
+ n.exponent -= e
+ } else {
+ n.exponent += e
+ }
+ }
+
+ // d.n = n
+
+ if !eof {
+ if jsonUnreadAfterDecNum {
+ r.unreadn1()
+ } else {
+ if !jsonIsWS(b) {
+ d.tok = b
+ }
+ }
+ }
+ // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n",
+ // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
+ return
+}
+
+func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ d.decNum(false)
+ n := &d.n
+ if n.manOverflow {
+ d.d.errorf("json: overflow integer after: %v", n.mantissa)
+ return
+ }
+ var u uint64
+ if n.exponent == 0 {
+ u = n.mantissa
+ } else if n.exponent < 0 {
+ d.d.errorf("json: fractional integer")
+ return
+ } else if n.exponent > 0 {
+ var overflow bool
+ if u, overflow = n.uintExp(); overflow {
+ d.d.errorf("json: overflow integer")
+ return
+ }
+ }
+ i = int64(u)
+ if n.neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+ return
+ }
+ // fmt.Printf("DecodeInt: %v\n", i)
+ return
+}
+
+// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number
+func (d *jsonDecDriver) floatVal() (f float64) {
+ f, useStrConv := d.n.floatVal()
+ if useStrConv {
+ var err error
+ if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil {
+ panic(fmt.Errorf("parse float: %s, %v", d.bs, err))
+ }
+ if d.n.neg {
+ f = -f
+ }
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
+ d.decNum(false)
+ n := &d.n
+ if n.neg {
+ d.d.errorf("json: unsigned integer cannot be negative")
+ return
+ }
+ if n.manOverflow {
+ d.d.errorf("json: overflow integer after: %v", n.mantissa)
+ return
+ }
+ if n.exponent == 0 {
+ u = n.mantissa
+ } else if n.exponent < 0 {
+ d.d.errorf("json: fractional integer")
+ return
+ } else if n.exponent > 0 {
+ var overflow bool
+ if u, overflow = n.uintExp(); overflow {
+ d.d.errorf("json: overflow integer")
+ return
+ }
+ }
+ if chkOvf.Uint(u, bitsize) {
+ d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+ return
+ }
+ // fmt.Printf("DecodeUint: %v\n", u)
+ return
+}
+
+func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ d.decNum(true)
+ f = d.floatVal()
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("json: overflow float32: %v, %s", f, d.bs)
+ return
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = xtag
+ d.d.decode(&re.Value)
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
+ if !isstring && d.se.i != nil {
+ bsOut = bs
+ d.DecodeExt(&bsOut, 0, &d.se)
+ return
+ }
+ d.appendStringAsBytes()
+ // if isstring, then just return the bytes, even if it is using the scratch buffer.
+ // the bytes will be converted to a string as needed.
+ if isstring {
+ return d.bs
+ }
+ bs0 := d.bs
+ slen := base64.StdEncoding.DecodedLen(len(bs0))
+ if slen <= cap(bs) {
+ bsOut = bs[:slen]
+ } else if zerocopy && slen <= cap(d.b2) {
+ bsOut = d.b2[:slen]
+ } else {
+ bsOut = make([]byte, slen)
+ }
+ slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
+ if err != nil {
+ d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err)
+ return nil
+ }
+ if slen != slen2 {
+ bsOut = bsOut[:slen2]
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeString() (s string) {
+ d.appendStringAsBytes()
+ // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
+ if d.c == containerMapKey {
+ return d.d.string(d.bs)
+ }
+ return string(d.bs)
+}
+
+func (d *jsonDecDriver) appendStringAsBytes() {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '"' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
+ }
+ d.tok = 0
+
+ v := d.bs[:0]
+ var c uint8
+ r := d.r
+ for {
+ c = r.readn1()
+ if c == '"' {
+ break
+ } else if c == '\\' {
+ c = r.readn1()
+ switch c {
+ case '"', '\\', '/', '\'':
+ v = append(v, c)
+ case 'b':
+ v = append(v, '\b')
+ case 'f':
+ v = append(v, '\f')
+ case 'n':
+ v = append(v, '\n')
+ case 'r':
+ v = append(v, '\r')
+ case 't':
+ v = append(v, '\t')
+ case 'u':
+ rr := d.jsonU4(false)
+ // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
+ if utf16.IsSurrogate(rr) {
+ rr = utf16.DecodeRune(rr, d.jsonU4(true))
+ }
+ w2 := utf8.EncodeRune(d.bstr[:], rr)
+ v = append(v, d.bstr[:w2]...)
+ default:
+ d.d.errorf("json: unsupported escaped value: %c", c)
+ }
+ } else {
+ v = append(v, c)
+ }
+ }
+ d.bs = v
+}
+
+func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
+ r := d.r
+ if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') {
+ d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
+ return 0
+ }
+ // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
+ var u uint32
+ for i := 0; i < 4; i++ {
+ v := r.readn1()
+ if '0' <= v && v <= '9' {
+ v = v - '0'
+ } else if 'a' <= v && v <= 'z' {
+ v = v - 'a' + 10
+ } else if 'A' <= v && v <= 'Z' {
+ v = v - 'A' + 10
+ } else {
+ d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v)
+ return 0
+ }
+ u = u*16 + uint32(v)
+ }
+ return rune(u)
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+ z := &d.d.n
+ // var decodeFurther bool
+
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ switch d.tok {
+ case 'n':
+ d.readStrIdx(10, 13) // ull
+ z.v = valueTypeNil
+ case 'f':
+ d.readStrIdx(5, 9) // alse
+ z.v = valueTypeBool
+ z.b = false
+ case 't':
+ d.readStrIdx(1, 4) // rue
+ z.v = valueTypeBool
+ z.b = true
+ case '{':
+ z.v = valueTypeMap
+ // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart
+ // decodeFurther = true
+ case '[':
+ z.v = valueTypeArray
+ // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart
+ // decodeFurther = true
+ case '"':
+ z.v = valueTypeString
+ z.s = d.DecodeString()
+ default: // number
+ d.decNum(true)
+ n := &d.n
+ // if the string had a any of [.eE], then decode as float.
+ switch {
+ case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
+ z.v = valueTypeFloat
+ z.f = d.floatVal()
+ case n.exponent == 0:
+ u := n.mantissa
+ switch {
+ case n.neg:
+ z.v = valueTypeInt
+ z.i = -int64(u)
+ case d.h.SignedInteger:
+ z.v = valueTypeInt
+ z.i = int64(u)
+ default:
+ z.v = valueTypeUint
+ z.u = u
+ }
+ default:
+ u, overflow := n.uintExp()
+ switch {
+ case overflow:
+ z.v = valueTypeFloat
+ z.f = d.floatVal()
+ case n.neg:
+ z.v = valueTypeInt
+ z.i = -int64(u)
+ case d.h.SignedInteger:
+ z.v = valueTypeInt
+ z.i = int64(u)
+ default:
+ z.v = valueTypeUint
+ z.u = u
+ }
+ }
+ // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
+ }
+ // if decodeFurther {
+ // d.s.sc.retryRead()
+ // }
+ return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+// - decodes numbers into interface{} as int, uint or float64
+// - configurable way to encode/decode []byte .
+// by default, encodes and decodes []byte using base64 Std Encoding
+// - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library and
+// minimizing allocations.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a msgpack value,
+// all from the same stream in sequence.
+type JsonHandle struct {
+ textEncodingType
+ BasicHandle
+ // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
+ // If not configured, raw bytes are encoded to/from base64 text.
+ RawBytesExt InterfaceExt
+
+ // Indent indicates how a value is encoded.
+ // - If positive, indent by that number of spaces.
+ // - If negative, indent by that number of tabs.
+ Indent int8
+
+ // IntegerAsString controls how integers (signed and unsigned) are encoded.
+ //
+ // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+ // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
+ // This can be mitigated by configuring how to encode integers.
+ //
+ // IntegerAsString interpretes the following values:
+ // - if 'L', then encode integers > 2^53 as a json string.
+ // - if 'A', then encode all integers as a json string
+ // containing the exact integer representation as a decimal.
+ // - else encode all integers as a json number (default)
+ IntegerAsString uint8
+}
+
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
+ hd := jsonEncDriver{e: e, h: h}
+ hd.bs = hd.b[:0]
+
+ hd.reset()
+
+ return &hd
+}
+
+func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
+ // d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
+ hd := jsonDecDriver{d: d, h: h}
+ hd.bs = hd.b[:0]
+ hd.reset()
+ return &hd
+}
+
+func (e *jsonEncDriver) reset() {
+ e.w = e.e.w
+ e.se.i = e.h.RawBytesExt
+ if e.bs != nil {
+ e.bs = e.bs[:0]
+ }
+ e.d, e.dt, e.dl, e.ds = false, false, 0, ""
+ e.c = 0
+ if e.h.Indent > 0 {
+ e.d = true
+ e.ds = jsonSpaces[:e.h.Indent]
+ } else if e.h.Indent < 0 {
+ e.d = true
+ e.dt = true
+ e.ds = jsonTabs[:-(e.h.Indent)]
+ }
+}
+
+func (d *jsonDecDriver) reset() {
+ d.r = d.d.r
+ d.se.i = d.h.RawBytesExt
+ if d.bs != nil {
+ d.bs = d.bs[:0]
+ }
+ d.c, d.tok = 0, 0
+ d.n.reset()
+}
+
+var jsonEncodeTerminate = []byte{' '}
+
+func (h *JsonHandle) rpcEncodeTerminate() []byte {
+ return jsonEncodeTerminate
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go
new file mode 100644
index 0000000..f9f8723
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -0,0 +1,845 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+MSGPACK
+
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+ - Go intX (>0) and uintX
+ IS ENCODED AS
+ msgpack +ve fixnum, unsigned
+ - Go intX (<0)
+ IS ENCODED AS
+ msgpack -ve fixnum, signed
+
+*/
+package codec
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "net/rpc"
+ "reflect"
+)
+
+const (
+ mpPosFixNumMin byte = 0x00
+ mpPosFixNumMax = 0x7f
+ mpFixMapMin = 0x80
+ mpFixMapMax = 0x8f
+ mpFixArrayMin = 0x90
+ mpFixArrayMax = 0x9f
+ mpFixStrMin = 0xa0
+ mpFixStrMax = 0xbf
+ mpNil = 0xc0
+ _ = 0xc1
+ mpFalse = 0xc2
+ mpTrue = 0xc3
+ mpFloat = 0xca
+ mpDouble = 0xcb
+ mpUint8 = 0xcc
+ mpUint16 = 0xcd
+ mpUint32 = 0xce
+ mpUint64 = 0xcf
+ mpInt8 = 0xd0
+ mpInt16 = 0xd1
+ mpInt32 = 0xd2
+ mpInt64 = 0xd3
+
+ // extensions below
+ mpBin8 = 0xc4
+ mpBin16 = 0xc5
+ mpBin32 = 0xc6
+ mpExt8 = 0xc7
+ mpExt16 = 0xc8
+ mpExt32 = 0xc9
+ mpFixExt1 = 0xd4
+ mpFixExt2 = 0xd5
+ mpFixExt4 = 0xd6
+ mpFixExt8 = 0xd7
+ mpFixExt16 = 0xd8
+
+ mpStr8 = 0xd9 // new
+ mpStr16 = 0xda
+ mpStr32 = 0xdb
+
+ mpArray16 = 0xdc
+ mpArray32 = 0xdd
+
+ mpMap16 = 0xde
+ mpMap32 = 0xdf
+
+ mpNegFixNumMin = 0xe0
+ mpNegFixNumMax = 0xff
+)
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+ fixCutoff int
+ bFixMin, b8, b16, b32 byte
+ hasFixMin, has8, has8Always bool
+}
+
+var (
+ msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
+ msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
+ msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
+ msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ w encWriter
+ h *MsgpackHandle
+ x [8]byte
+}
+
+func (e *msgpackEncDriver) EncodeNil() {
+ e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) EncodeInt(i int64) {
+ if i >= 0 {
+ e.EncodeUint(uint64(i))
+ } else if i >= -32 {
+ e.w.writen1(byte(i))
+ } else if i >= math.MinInt8 {
+ e.w.writen2(mpInt8, byte(i))
+ } else if i >= math.MinInt16 {
+ e.w.writen1(mpInt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i >= math.MinInt32 {
+ e.w.writen1(mpInt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpInt64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeUint(i uint64) {
+ if i <= math.MaxInt8 {
+ e.w.writen1(byte(i))
+ } else if i <= math.MaxUint8 {
+ e.w.writen2(mpUint8, byte(i))
+ } else if i <= math.MaxUint16 {
+ e.w.writen1(mpUint16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i <= math.MaxUint32 {
+ e.w.writen1(mpUint32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpUint64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(mpTrue)
+ } else {
+ e.w.writen1(mpFalse)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(mpFloat)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(mpDouble)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(v)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+ } else {
+ e.EncodeStringBytes(c_RAW, bs)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+ if l == 1 {
+ e.w.writen2(mpFixExt1, xtag)
+ } else if l == 2 {
+ e.w.writen2(mpFixExt2, xtag)
+ } else if l == 4 {
+ e.w.writen2(mpFixExt4, xtag)
+ } else if l == 8 {
+ e.w.writen2(mpFixExt8, xtag)
+ } else if l == 16 {
+ e.w.writen2(mpFixExt16, xtag)
+ } else if l < 256 {
+ e.w.writen2(mpExt8, byte(l))
+ e.w.writen1(xtag)
+ } else if l < 65536 {
+ e.w.writen1(mpExt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ e.w.writen1(xtag)
+ } else {
+ e.w.writen1(mpExt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ e.w.writen1(xtag)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeArrayStart(length int) {
+ e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) EncodeMapStart(length int) {
+ e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(s))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(s))
+ }
+ if len(s) > 0 {
+ e.w.writestr(s)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(bs))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(bs))
+ }
+ if len(bs) > 0 {
+ e.w.writeb(bs)
+ }
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+ if ct.hasFixMin && l < ct.fixCutoff {
+ e.w.writen1(ct.bFixMin | byte(l))
+ } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
+ e.w.writen2(ct.b8, uint8(l))
+ } else if l < 65536 {
+ e.w.writen1(ct.b16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ } else {
+ e.w.writen1(ct.b32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ }
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+ d *Decoder
+ r decReader // *Decoder decReader decReaderT
+ h *MsgpackHandle
+ b [scratchByteArrayLen]byte
+ bd byte
+ bdRead bool
+ br bool // bytes reader
+ noBuiltInTypes
+ noStreamingCodec
+ decNoSeparator
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch bd {
+ case mpNil:
+ n.v = valueTypeNil
+ d.bdRead = false
+ case mpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case mpTrue:
+ n.v = valueTypeBool
+ n.b = true
+
+ case mpFloat:
+ n.v = valueTypeFloat
+ n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ case mpDouble:
+ n.v = valueTypeFloat
+ n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+
+ case mpUint8:
+ n.v = valueTypeUint
+ n.u = uint64(d.r.readn1())
+ case mpUint16:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint64(d.r.readx(8)))
+
+ case mpInt8:
+ n.v = valueTypeInt
+ n.i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ n.v = valueTypeInt
+ n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ n.v = valueTypeInt
+ n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ n.v = valueTypeInt
+ n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
+
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ // positive fixnum (always signed)
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ // negative fixnum
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ if d.h.RawToString {
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ } else {
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ }
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ n.v = valueTypeExt
+ clen := d.readExtLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(clen)
+ default:
+ d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+ }
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ i = int64(uint64(d.r.readn1()))
+ case mpUint16:
+ i = int64(uint64(bigen.Uint16(d.r.readx(2))))
+ case mpUint32:
+ i = int64(uint64(bigen.Uint32(d.r.readx(4))))
+ case mpUint64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ case mpInt8:
+ i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ i = int64(int8(d.bd))
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ i = int64(int8(d.bd))
+ default:
+ d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+ d.d.errorf("Overflow int value: %v", i)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ ui = uint64(d.r.readn1())
+ case mpUint16:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ ui = bigen.Uint64(d.r.readx(8))
+ case mpInt8:
+ if i := int64(int8(d.r.readn1())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt16:
+ if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt32:
+ if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt64:
+ if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ ui = uint64(d.bd)
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
+ return
+ default:
+ d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+ d.d.errorf("Overflow uint value: %v", ui)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFloat {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == mpDouble {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ f = float64(d.DecodeInt(0))
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("msgpack: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFalse || d.bd == 0 {
+ // b = false
+ } else if d.bd == mpTrue || d.bd == 1 {
+ b = true
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ var clen int
+ // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin
+ if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ clen = d.readContainerLen(msgpackContainerBin)
+ } else {
+ clen = d.readContainerLen(msgpackContainerStr)
+ }
+ // println("DecodeBytes: clen: ", clen)
+ d.bdRead = false
+ // bytes may be nil, so handle it. if nil, clen=-1.
+ if clen < 0 {
+ return nil
+ }
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *msgpackDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *msgpackDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
+ bd := d.bd
+ if bd == mpNil {
+ return valueTypeNil
+ } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+ (!d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
+ return valueTypeBytes
+ } else if d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
+ return valueTypeString
+ } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ return valueTypeArray
+ } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpNil {
+ d.bdRead = false
+ v = true
+ }
+ return
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+ bd := d.bd
+ if bd == mpNil {
+ clen = -1 // to represent nil
+ } else if bd == ct.b8 {
+ clen = int(d.r.readn1())
+ } else if bd == ct.b16 {
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ } else if bd == ct.b32 {
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ } else if (ct.bFixMin & bd) == ct.bFixMin {
+ clen = int(ct.bFixMin ^ bd)
+ } else {
+ d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) ReadMapStart() int {
+ return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) ReadArrayStart() int {
+ return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+ switch d.bd {
+ case mpNil:
+ clen = -1 // to represent nil
+ case mpFixExt1:
+ clen = 1
+ case mpFixExt2:
+ clen = 2
+ case mpFixExt4:
+ clen = 4
+ case mpFixExt8:
+ clen = 8
+ case mpFixExt16:
+ clen = 16
+ case mpExt8:
+ clen = int(d.r.readn1())
+ case mpExt16:
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ case mpExt32:
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ default:
+ d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
+ return
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ xbd := d.bd
+ if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
+ xbs = d.DecodeBytes(nil, false, true)
+ } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
+ (xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
+ xbs = d.DecodeBytes(nil, true, true)
+ } else {
+ clen := d.readExtLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(clen)
+ }
+ d.bdRead = false
+ return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+ BasicHandle
+
+ // RawToString controls how raw bytes are decoded into a nil interface{}.
+ RawToString bool
+
+ // WriteExt flag supports encoding configured extensions with extension tags.
+ // It also controls whether other elements of the new spec are encoded (ie Str8).
+ //
+ // With WriteExt=false, configured extensions are serialized as raw bytes
+ // and Str8 is not encoded.
+ //
+ // A stream can still be decoded into a typed value, provided an appropriate value
+ // is provided, but the type cannot be inferred from the stream. If no appropriate
+ // type is provided (e.g. decoding into a nil interface{}), you get back
+ // a []byte or string based on the setting of RawToString.
+ WriteExt bool
+ binaryEncodingType
+}
+
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
+ return &msgpackEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
+ return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *msgpackEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *msgpackDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+ rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // WriteRequest can write to both a Go service, and other services that do
+ // not abide by the 1 argument rule of a Go service.
+ // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+ var bodyArr []interface{}
+ if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+ bodyArr = ([]interface{})(m)
+ } else {
+ bodyArr = []interface{}{body}
+ }
+ r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ var moe interface{}
+ if r.Error != "" {
+ moe = r.Error
+ }
+ if moe != nil && body != nil {
+ body = nil
+ }
+ r2 := []interface{}{1, uint32(r.Seq), moe, body}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+ if body == nil { // read and discard
+ return c.read(nil)
+ }
+ bodyArr := []interface{}{body}
+ return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+
+ if c.isClosed() {
+ return io.EOF
+ }
+
+ // We read the response header by hand
+ // so that the body can be decoded on its own from the stream at a later time.
+
+ const fia byte = 0x94 //four item array descriptor value
+ // Not sure why the panic of EOF is swallowed above.
+ // if bs1 := c.dec.r.readn1(); bs1 != fia {
+ // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
+ // return
+ // }
+ var b byte
+ b, err = c.br.ReadByte()
+ if err != nil {
+ return
+ }
+ if b != fia {
+ err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
+ return
+ }
+
+ if err = c.read(&b); err != nil {
+ return
+ }
+ if b != expectTypeByte {
+ err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
+ return
+ }
+ if err = c.read(msgid); err != nil {
+ return
+ }
+ if err = c.read(methodOrError); err != nil {
+ return
+ }
+ return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go
new file mode 100644
index 0000000..cfee3d0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math/rand"
+ "time"
+)
+
+// NoopHandle returns a no-op handle. It basically does nothing.
+// It is only useful for benchmarking, as it gives an idea of the
+// overhead from the codec framework.
+//
+// LIBRARY USERS: *** DO NOT USE ***
+func NoopHandle(slen int) *noopHandle {
+ h := noopHandle{}
+ h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
+ h.B = make([][]byte, slen)
+ h.S = make([]string, slen)
+ for i := 0; i < len(h.S); i++ {
+ b := make([]byte, i+1)
+ for j := 0; j < len(b); j++ {
+ b[j] = 'a' + byte(i)
+ }
+ h.B[i] = b
+ h.S[i] = string(b)
+ }
+ return &h
+}
+
+// noopHandle does nothing.
+// It is used to simulate the overhead of the codec framework.
+type noopHandle struct {
+ BasicHandle
+ binaryEncodingType
+ noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
+}
+
+type noopDrv struct {
+ d *Decoder
+ e *Encoder
+ i int
+ S []string
+ B [][]byte
+ mks []bool // stack. if map (true), else if array (false)
+ mk bool // top of stack. what container are we on? map or array?
+ ct valueType // last response for IsContainerType.
+ cb int // counter for ContainerType
+ rand *rand.Rand
+}
+
+func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
+func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
+
+func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
+func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
+
+func (h *noopDrv) reset() {}
+func (h *noopDrv) uncacheRead() {}
+
+// --- encDriver
+
+// stack functions (for map and array)
+func (h *noopDrv) start(b bool) {
+ // println("start", len(h.mks)+1)
+ h.mks = append(h.mks, b)
+ h.mk = b
+}
+func (h *noopDrv) end() {
+ // println("end: ", len(h.mks)-1)
+ h.mks = h.mks[:len(h.mks)-1]
+ if len(h.mks) > 0 {
+ h.mk = h.mks[len(h.mks)-1]
+ } else {
+ h.mk = false
+ }
+}
+
+func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (h *noopDrv) EncodeNil() {}
+func (h *noopDrv) EncodeInt(i int64) {}
+func (h *noopDrv) EncodeUint(i uint64) {}
+func (h *noopDrv) EncodeBool(b bool) {}
+func (h *noopDrv) EncodeFloat32(f float32) {}
+func (h *noopDrv) EncodeFloat64(f float64) {}
+func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
+func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
+func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
+func (h *noopDrv) EncodeEnd() { h.end() }
+
+func (h *noopDrv) EncodeString(c charEncoding, v string) {}
+func (h *noopDrv) EncodeSymbol(v string) {}
+func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
+
+func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
+
+// ---- decDriver
+func (h *noopDrv) initReadNext() {}
+func (h *noopDrv) CheckBreak() bool { return false }
+func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
+func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
+func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
+func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
+func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
+func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
+
+// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
+
+func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
+
+func (h *noopDrv) ReadEnd() { h.end() }
+
+// toggle map/slice
+func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
+func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
+
+func (h *noopDrv) ContainerType() (vt valueType) {
+ // return h.m(2) == 0
+ // handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
+ // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
+ // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
+ // however, every 10th time it is called, we just return something else.
+ var vals = [...]valueType{valueTypeArray, valueTypeMap}
+ // ------------ TAKE ------------
+ // if h.cb%2 == 0 {
+ // if h.ct == valueTypeMap || h.ct == valueTypeArray {
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // } else if h.cb%5 == 0 {
+ // h.ct = valueType(h.m(8))
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // ------------ TAKE ------------
+ // if h.cb%16 == 0 {
+ // h.ct = valueType(h.cb % 8)
+ // } else {
+ // h.ct = vals[h.cb%2]
+ // }
+ h.ct = vals[h.cb%2]
+ h.cb++
+ return h.ct
+
+ // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
+ // return h.ct
+ // }
+ // return valueTypeUnset
+ // TODO: may need to tweak this so it works.
+ // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
+ // h.cb = !h.cb
+ // h.ct = vt
+ // return h.cb
+ // }
+ // // go in a loop and check it.
+ // h.ct = vt
+ // h.cb = h.m(7) == 0
+ // return h.cb
+}
+func (h *noopDrv) TryDecodeAsNil() bool {
+ if h.mk {
+ return false
+ } else {
+ return h.m(8) == 0
+ }
+}
+func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
+ return 0
+}
+
+func (h *noopDrv) DecodeNaked() {
+ // use h.r (random) not h.m() because h.m() could cause the same value to be given.
+ var sk int
+ if h.mk {
+ // if mapkey, do not support values of nil OR bytes, array, map or rawext
+ sk = h.r(7) + 1
+ } else {
+ sk = h.r(12)
+ }
+ n := &h.d.n
+ switch sk {
+ case 0:
+ n.v = valueTypeNil
+ case 1:
+ n.v, n.b = valueTypeBool, false
+ case 2:
+ n.v, n.b = valueTypeBool, true
+ case 3:
+ n.v, n.i = valueTypeInt, h.DecodeInt(64)
+ case 4:
+ n.v, n.u = valueTypeUint, h.DecodeUint(64)
+ case 5:
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
+ case 6:
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
+ case 7:
+ n.v, n.s = valueTypeString, h.DecodeString()
+ case 8:
+ n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
+ case 9:
+ n.v = valueTypeArray
+ case 10:
+ n.v = valueTypeMap
+ default:
+ n.v = valueTypeExt
+ n.u = h.DecodeUint(64)
+ n.l = h.B[h.m(len(h.B))]
+ }
+ h.ct = n.v
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go
new file mode 100644
index 0000000..2353263
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go
@@ -0,0 +1,3 @@
+package codec
+
+//go:generate bash prebuild.sh
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh
new file mode 100644
index 0000000..909f4bb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh
@@ -0,0 +1,199 @@
+#!/bin/bash
+
+# _needgen is a helper function to tell if we need to generate files for msgp, codecgen.
+_needgen() {
+ local a="$1"
+ zneedgen=0
+ if [[ ! -e "$a" ]]
+ then
+ zneedgen=1
+ echo 1
+ return 0
+ fi
+ for i in `ls -1 *.go.tmpl gen.go values_test.go`
+ do
+ if [[ "$a" -ot "$i" ]]
+ then
+ zneedgen=1
+ echo 1
+ return 0
+ fi
+ done
+ echo 0
+}
+
+# _build generates fast-path.go and gen-helper.go.
+#
+# It is needed because there is some dependency between the generated code
+# and the other classes. Consequently, we have to totally remove the
+# generated files and put stubs in place, before calling "go run" again
+# to recreate them.
+_build() {
+ if ! [[ "${zforce}" == "1" ||
+ "1" == $( _needgen "fast-path.generated.go" ) ||
+ "1" == $( _needgen "gen-helper.generated.go" ) ||
+ "1" == $( _needgen "gen.generated.go" ) ||
+ 1 == 0 ]]
+ then
+ return 0
+ fi
+
+ # echo "Running prebuild"
+ if [ "${zbak}" == "1" ]
+ then
+ # echo "Backing up old generated files"
+ _zts=`date '+%m%d%Y_%H%M%S'`
+ _gg=".generated.go"
+ [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
+ [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
+ # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak
+ # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak
+ else
+ rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \
+ *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
+ fi
+
+ cat > gen.generated.go <<EOF
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = \`
+EOF
+
+ cat >> gen.generated.go < gen-dec-map.go.tmpl
+
+ cat >> gen.generated.go <<EOF
+\`
+
+const genDecListTmpl = \`
+EOF
+
+ cat >> gen.generated.go < gen-dec-array.go.tmpl
+
+ cat >> gen.generated.go <<EOF
+\`
+
+EOF
+
+ cat > gen-from-tmpl.codec.generated.go <<EOF
+package codec
+import "io"
+func GenInternalGoFile(r io.Reader, w io.Writer, safe bool) error {
+return genInternalGoFile(r, w, safe)
+}
+EOF
+
+ cat > gen-from-tmpl.generated.go <<EOF
+//+build ignore
+
+package main
+
+//import "flag"
+import "ugorji.net/codec"
+import "os"
+
+func run(fnameIn, fnameOut string, safe bool) {
+fin, err := os.Open(fnameIn)
+if err != nil { panic(err) }
+defer fin.Close()
+fout, err := os.Create(fnameOut)
+if err != nil { panic(err) }
+defer fout.Close()
+err = codec.GenInternalGoFile(fin, fout, safe)
+if err != nil { panic(err) }
+}
+
+func main() {
+// do not make safe/unsafe variants.
+// Instead, depend on escape analysis, and place string creation and usage appropriately.
+// run("unsafe.go.tmpl", "safe.generated.go", true)
+// run("unsafe.go.tmpl", "unsafe.generated.go", false)
+run("fast-path.go.tmpl", "fast-path.generated.go", false)
+run("gen-helper.go.tmpl", "gen-helper.generated.go", false)
+}
+
+EOF
+ go run -tags=notfastpath gen-from-tmpl.generated.go && \
+ rm -f gen-from-tmpl.*generated.go
+}
+
+_codegenerators() {
+ if [[ $zforce == "1" ||
+ "1" == $( _needgen "values_codecgen${zsfx}" ) ||
+ "1" == $( _needgen "values_msgp${zsfx}" ) ||
+ "1" == $( _needgen "values_ffjson${zsfx}" ) ||
+ 1 == 0 ]]
+ then
+ # codecgen creates some temporary files in the directory (main, pkg).
+ # Consequently, we should start msgp and ffjson first, and also put a small time latency before
+ # starting codecgen.
+ # Without this, ffjson chokes on one of the temporary files from codecgen.
+ if [[ $zexternal == "1" ]]
+ then
+ echo "ffjson ... " && \
+ ffjson -w values_ffjson${zsfx} $zfin &
+ zzzIdFF=$!
+ echo "msgp ... " && \
+ msgp -tests=false -o=values_msgp${zsfx} -file=$zfin &
+ zzzIdMsgp=$!
+
+ sleep 1 # give ffjson and msgp some buffer time. see note above.
+ fi
+
+ echo "codecgen - !unsafe ... " && \
+ codecgen -rt codecgen -t 'x,codecgen,!unsafe' -o values_codecgen${zsfx} -d 19780 $zfin &
+ zzzIdC=$!
+ echo "codecgen - unsafe ... " && \
+ codecgen -u -rt codecgen -t 'x,codecgen,unsafe' -o values_codecgen_unsafe${zsfx} -d 19781 $zfin &
+ zzzIdCU=$!
+ wait $zzzIdC $zzzIdCU $zzzIdMsgp $zzzIdFF && \
+ # remove (M|Unm)arshalJSON implementations, so they don't conflict with encoding/json bench \
+ if [[ $zexternal == "1" ]]
+ then
+ sed -i 's+ MarshalJSON(+ _MarshalJSON(+g' values_ffjson${zsfx} && \
+ sed -i 's+ UnmarshalJSON(+ _UnmarshalJSON(+g' values_ffjson${zsfx}
+ fi && \
+ echo "generators done!" && \
+ true
+ fi
+}
+
+# _init reads the arguments and sets up the flags
+_init() {
+OPTIND=1
+while getopts "fbx" flag
+do
+ case "x$flag" in
+ 'xf') zforce=1;;
+ 'xb') zbak=1;;
+ 'xx') zexternal=1;;
+ *) echo "prebuild.sh accepts [-fbx] only"; return 1;;
+ esac
+done
+shift $((OPTIND-1))
+OPTIND=1
+}
+
+# main script.
+# First ensure that this is being run from the basedir (i.e. dirname of script is .)
+if [ "." = `dirname $0` ]
+then
+ zmydir=`pwd`
+ zfin="test_values.generated.go"
+ zsfx="_generated_test.go"
+ # rm -f *_generated_test.go
+ rm -f codecgen-*.go && \
+ _init "$@" && \
+ _build && \
+ cp $zmydir/values_test.go $zmydir/$zfin && \
+ _codegenerators && \
+ echo prebuild done successfully
+ rm -f $zmydir/$zfin
+else
+ echo "Script must be run from the directory it resides in"
+fi
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go
new file mode 100644
index 0000000..dad53d0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go
@@ -0,0 +1,180 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "io"
+ "net/rpc"
+ "sync"
+)
+
+// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
+//
+// Some codecs like json need to put a space after each encoded value, to serve as a
+// delimiter for things like numbers (else json codec will continue reading till EOF).
+type rpcEncodeTerminator interface {
+ rpcEncodeTerminate() []byte
+}
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+ ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+ ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
+// used by the rpc connection. It accomodates use-cases where the connection
+// should be used by rpc and non-rpc functions, e.g. streaming a file after
+// sending an rpc response.
+type RpcCodecBuffered interface {
+ BufferedReader() *bufio.Reader
+ BufferedWriter() *bufio.Writer
+}
+
+// -------------------------------------
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+ rwc io.ReadWriteCloser
+ dec *Decoder
+ enc *Encoder
+ bw *bufio.Writer
+ br *bufio.Reader
+ mu sync.Mutex
+ h Handle
+
+ cls bool
+ clsmu sync.RWMutex
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+ bw := bufio.NewWriter(conn)
+ br := bufio.NewReader(conn)
+ return rpcCodec{
+ rwc: conn,
+ bw: bw,
+ br: br,
+ enc: NewEncoder(bw, h),
+ dec: NewDecoder(br, h),
+ h: h,
+ }
+}
+
+func (c *rpcCodec) BufferedReader() *bufio.Reader {
+ return c.br
+}
+
+func (c *rpcCodec) BufferedWriter() *bufio.Writer {
+ return c.bw
+}
+
+func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+ if err = c.enc.Encode(obj1); err != nil {
+ return
+ }
+ t, tOk := c.h.(rpcEncodeTerminator)
+ if tOk {
+ c.bw.Write(t.rpcEncodeTerminate())
+ }
+ if writeObj2 {
+ if err = c.enc.Encode(obj2); err != nil {
+ return
+ }
+ if tOk {
+ c.bw.Write(t.rpcEncodeTerminate())
+ }
+ }
+ if doFlush {
+ return c.bw.Flush()
+ }
+ return
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+ //If nil is passed in, we should still attempt to read content to nowhere.
+ if obj == nil {
+ var obj2 interface{}
+ return c.dec.Decode(&obj2)
+ }
+ return c.dec.Decode(obj)
+}
+
+func (c *rpcCodec) isClosed() bool {
+ c.clsmu.RLock()
+ x := c.cls
+ c.clsmu.RUnlock()
+ return x
+}
+
+func (c *rpcCodec) Close() error {
+ if c.isClosed() {
+ return io.EOF
+ }
+ c.clsmu.Lock()
+ c.cls = true
+ c.clsmu.Unlock()
+ return c.rwc.Close()
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+ rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // Must protect for concurrent access as per API
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go
new file mode 100644
index 0000000..7c0ba7a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go
@@ -0,0 +1,519 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+)
+
+const (
+ _ uint8 = iota
+ simpleVdNil = 1
+ simpleVdFalse = 2
+ simpleVdTrue = 3
+ simpleVdFloat32 = 4
+ simpleVdFloat64 = 5
+
+ // each lasts for 4 (ie n, n+1, n+2, n+3)
+ simpleVdPosInt = 8
+ simpleVdNegInt = 12
+
+ // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+ simpleVdString = 216
+ simpleVdByteArray = 224
+ simpleVdArray = 232
+ simpleVdMap = 240
+ simpleVdExt = 248
+)
+
+type simpleEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ h *SimpleHandle
+ w encWriter
+ b [8]byte
+}
+
+func (e *simpleEncDriver) EncodeNil() {
+ e.w.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(simpleVdTrue)
+ } else {
+ e.w.writen1(simpleVdFalse)
+ }
+}
+
+func (e *simpleEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(simpleVdFloat32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(simpleVdFloat64)
+ bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-v), simpleVdNegInt)
+ } else {
+ e.encUint(uint64(v), simpleVdPosInt)
+ }
+}
+
+func (e *simpleEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 1)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+ if length == 0 {
+ e.w.writen1(bd)
+ } else if length <= math.MaxUint8 {
+ e.w.writen1(bd + 1)
+ e.w.writen1(uint8(length))
+ } else if length <= math.MaxUint16 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
+ } else if int64(length) <= math.MaxUint32 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
+ } else {
+ e.w.writen1(bd + 4)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
+ }
+}
+
+func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(simpleVdExt, length)
+ e.w.writen1(xtag)
+}
+
+func (e *simpleEncDriver) EncodeArrayStart(length int) {
+ e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) EncodeMapStart(length int) {
+ e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
+ e.encLen(simpleVdString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *simpleEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ e.encLen(simpleVdByteArray, len(v))
+ e.w.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+ d *Decoder
+ h *SimpleHandle
+ r decReader
+ bdRead bool
+ bd byte
+ br bool // bytes reader
+ noBuiltInTypes
+ noStreamingCodec
+ decNoSeparator
+ b [scratchByteArrayLen]byte
+}
+
+func (d *simpleDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *simpleDecDriver) ContainerType() (vt valueType) {
+ if d.bd == simpleVdNil {
+ return valueTypeNil
+ } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
+ d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
+ return valueTypeBytes
+ } else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
+ d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
+ return valueTypeString
+ } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
+ d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
+ return valueTypeArray
+ } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
+ d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *simpleDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdPosInt:
+ ui = uint64(d.r.readn1())
+ case simpleVdPosInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case simpleVdPosInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case simpleVdPosInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ case simpleVdNegInt:
+ ui = uint64(d.r.readn1())
+ neg = true
+ case simpleVdNegInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ neg = true
+ case simpleVdNegInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ neg = true
+ case simpleVdNegInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ neg = true
+ default:
+ d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+ return
+ }
+ // don't do this check, because callers may only want the unsigned value.
+ // if ui > math.MaxInt64 {
+ // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
+ // return
+ // }
+ return
+}
+
+func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ ui, neg := d.decCheckInteger()
+ i, overflow := chkOvf.SignedInt(ui)
+ if overflow {
+ d.d.errorf("simple: overflow converting %v to signed integer", ui)
+ return
+ }
+ if neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("simple: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == simpleVdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
+ f = float64(d.DecodeInt(64))
+ } else {
+ d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("msgpack: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdTrue {
+ b = true
+ } else if d.bd == simpleVdFalse {
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) ReadMapStart() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayStart() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) decLen() int {
+ switch d.bd % 8 {
+ case 0:
+ return 0
+ case 1:
+ return int(d.r.readn1())
+ case 2:
+ return int(bigen.Uint16(d.r.readx(2)))
+ case 3:
+ ui := uint64(bigen.Uint32(d.r.readx(4)))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ case 4:
+ ui := bigen.Uint64(d.r.readx(8))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ }
+ d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
+ return -1
+}
+
+func (d *simpleDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ xbs = d.DecodeBytes(nil, false, true)
+ default:
+ d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case simpleVdNil:
+ n.v = valueTypeNil
+ case simpleVdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case simpleVdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
+ }
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ case simpleVdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
+ case simpleVdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
+ case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+// - Encoding of a value is always preceeded by the descriptor byte (bd)
+// - True, false, nil are encoded fully in 1 byte (the descriptor)
+// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+// - Lenght of containers (strings, bytes, array, map, extensions)
+// are encoded in 0, 1, 2, 4 or 8 bytes.
+// Zero-length containers have no length encoded.
+// For others, the number of bytes is given by pow(2, bd%3)
+// - maps are encoded as [bd] [length] [[key][value]]...
+// - arrays are encoded as [bd] [length] [value]...
+// - extensions are encoded as [bd] [length] [tag] [byte]...
+// - strings/bytearrays are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+ BasicHandle
+ binaryEncodingType
+}
+
+func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
+ return &simpleEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
+ return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *simpleEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *simpleDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
new file mode 100644
index 0000000..9028586
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
@@ -0,0 +1,639 @@
+[
+ {
+ "cbor": "AA==",
+ "hex": "00",
+ "roundtrip": true,
+ "decoded": 0
+ },
+ {
+ "cbor": "AQ==",
+ "hex": "01",
+ "roundtrip": true,
+ "decoded": 1
+ },
+ {
+ "cbor": "Cg==",
+ "hex": "0a",
+ "roundtrip": true,
+ "decoded": 10
+ },
+ {
+ "cbor": "Fw==",
+ "hex": "17",
+ "roundtrip": true,
+ "decoded": 23
+ },
+ {
+ "cbor": "GBg=",
+ "hex": "1818",
+ "roundtrip": true,
+ "decoded": 24
+ },
+ {
+ "cbor": "GBk=",
+ "hex": "1819",
+ "roundtrip": true,
+ "decoded": 25
+ },
+ {
+ "cbor": "GGQ=",
+ "hex": "1864",
+ "roundtrip": true,
+ "decoded": 100
+ },
+ {
+ "cbor": "GQPo",
+ "hex": "1903e8",
+ "roundtrip": true,
+ "decoded": 1000
+ },
+ {
+ "cbor": "GgAPQkA=",
+ "hex": "1a000f4240",
+ "roundtrip": true,
+ "decoded": 1000000
+ },
+ {
+ "cbor": "GwAAAOjUpRAA",
+ "hex": "1b000000e8d4a51000",
+ "roundtrip": true,
+ "decoded": 1000000000000
+ },
+ {
+ "cbor": "G///////////",
+ "hex": "1bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": 18446744073709551615
+ },
+ {
+ "cbor": "wkkBAAAAAAAAAAA=",
+ "hex": "c249010000000000000000",
+ "roundtrip": true,
+ "decoded": 18446744073709551616
+ },
+ {
+ "cbor": "O///////////",
+ "hex": "3bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": -18446744073709551616,
+ "skip": true
+ },
+ {
+ "cbor": "w0kBAAAAAAAAAAA=",
+ "hex": "c349010000000000000000",
+ "roundtrip": true,
+ "decoded": -18446744073709551617
+ },
+ {
+ "cbor": "IA==",
+ "hex": "20",
+ "roundtrip": true,
+ "decoded": -1
+ },
+ {
+ "cbor": "KQ==",
+ "hex": "29",
+ "roundtrip": true,
+ "decoded": -10
+ },
+ {
+ "cbor": "OGM=",
+ "hex": "3863",
+ "roundtrip": true,
+ "decoded": -100
+ },
+ {
+ "cbor": "OQPn",
+ "hex": "3903e7",
+ "roundtrip": true,
+ "decoded": -1000
+ },
+ {
+ "cbor": "+QAA",
+ "hex": "f90000",
+ "roundtrip": true,
+ "decoded": 0.0
+ },
+ {
+ "cbor": "+YAA",
+ "hex": "f98000",
+ "roundtrip": true,
+ "decoded": -0.0
+ },
+ {
+ "cbor": "+TwA",
+ "hex": "f93c00",
+ "roundtrip": true,
+ "decoded": 1.0
+ },
+ {
+ "cbor": "+z/xmZmZmZma",
+ "hex": "fb3ff199999999999a",
+ "roundtrip": true,
+ "decoded": 1.1
+ },
+ {
+ "cbor": "+T4A",
+ "hex": "f93e00",
+ "roundtrip": true,
+ "decoded": 1.5
+ },
+ {
+ "cbor": "+Xv/",
+ "hex": "f97bff",
+ "roundtrip": true,
+ "decoded": 65504.0
+ },
+ {
+ "cbor": "+kfDUAA=",
+ "hex": "fa47c35000",
+ "roundtrip": true,
+ "decoded": 100000.0
+ },
+ {
+ "cbor": "+n9///8=",
+ "hex": "fa7f7fffff",
+ "roundtrip": true,
+ "decoded": 3.4028234663852886e+38
+ },
+ {
+ "cbor": "+3435DyIAHWc",
+ "hex": "fb7e37e43c8800759c",
+ "roundtrip": true,
+ "decoded": 1.0e+300
+ },
+ {
+ "cbor": "+QAB",
+ "hex": "f90001",
+ "roundtrip": true,
+ "decoded": 5.960464477539063e-08
+ },
+ {
+ "cbor": "+QQA",
+ "hex": "f90400",
+ "roundtrip": true,
+ "decoded": 6.103515625e-05
+ },
+ {
+ "cbor": "+cQA",
+ "hex": "f9c400",
+ "roundtrip": true,
+ "decoded": -4.0
+ },
+ {
+ "cbor": "+8AQZmZmZmZm",
+ "hex": "fbc010666666666666",
+ "roundtrip": true,
+ "decoded": -4.1
+ },
+ {
+ "cbor": "+XwA",
+ "hex": "f97c00",
+ "roundtrip": true,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+X4A",
+ "hex": "f97e00",
+ "roundtrip": true,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+fwA",
+ "hex": "f9fc00",
+ "roundtrip": true,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+n+AAAA=",
+ "hex": "fa7f800000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+n/AAAA=",
+ "hex": "fa7fc00000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+v+AAAA=",
+ "hex": "faff800000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+3/wAAAAAAAA",
+ "hex": "fb7ff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+3/4AAAAAAAA",
+ "hex": "fb7ff8000000000000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+//wAAAAAAAA",
+ "hex": "fbfff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "9A==",
+ "hex": "f4",
+ "roundtrip": true,
+ "decoded": false
+ },
+ {
+ "cbor": "9Q==",
+ "hex": "f5",
+ "roundtrip": true,
+ "decoded": true
+ },
+ {
+ "cbor": "9g==",
+ "hex": "f6",
+ "roundtrip": true,
+ "decoded": null
+ },
+ {
+ "cbor": "9w==",
+ "hex": "f7",
+ "roundtrip": true,
+ "diagnostic": "undefined"
+ },
+ {
+ "cbor": "8A==",
+ "hex": "f0",
+ "roundtrip": true,
+ "diagnostic": "simple(16)"
+ },
+ {
+ "cbor": "+Bg=",
+ "hex": "f818",
+ "roundtrip": true,
+ "diagnostic": "simple(24)"
+ },
+ {
+ "cbor": "+P8=",
+ "hex": "f8ff",
+ "roundtrip": true,
+ "diagnostic": "simple(255)"
+ },
+ {
+ "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
+ "hex": "c074323031332d30332d32315432303a30343a30305a",
+ "roundtrip": true,
+ "diagnostic": "0(\"2013-03-21T20:04:00Z\")"
+ },
+ {
+ "cbor": "wRpRS2ew",
+ "hex": "c11a514b67b0",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240)"
+ },
+ {
+ "cbor": "wftB1FLZ7CAAAA==",
+ "hex": "c1fb41d452d9ec200000",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240.5)"
+ },
+ {
+ "cbor": "10QBAgME",
+ "hex": "d74401020304",
+ "roundtrip": true,
+ "diagnostic": "23(h'01020304')"
+ },
+ {
+ "cbor": "2BhFZElFVEY=",
+ "hex": "d818456449455446",
+ "roundtrip": true,
+ "diagnostic": "24(h'6449455446')"
+ },
+ {
+ "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
+ "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
+ "roundtrip": true,
+ "diagnostic": "32(\"http://www.example.com\")"
+ },
+ {
+ "cbor": "QA==",
+ "hex": "40",
+ "roundtrip": true,
+ "diagnostic": "h''"
+ },
+ {
+ "cbor": "RAECAwQ=",
+ "hex": "4401020304",
+ "roundtrip": true,
+ "diagnostic": "h'01020304'"
+ },
+ {
+ "cbor": "YA==",
+ "hex": "60",
+ "roundtrip": true,
+ "decoded": ""
+ },
+ {
+ "cbor": "YWE=",
+ "hex": "6161",
+ "roundtrip": true,
+ "decoded": "a"
+ },
+ {
+ "cbor": "ZElFVEY=",
+ "hex": "6449455446",
+ "roundtrip": true,
+ "decoded": "IETF"
+ },
+ {
+ "cbor": "YiJc",
+ "hex": "62225c",
+ "roundtrip": true,
+ "decoded": "\"\\"
+ },
+ {
+ "cbor": "YsO8",
+ "hex": "62c3bc",
+ "roundtrip": true,
+ "decoded": "ü"
+ },
+ {
+ "cbor": "Y+awtA==",
+ "hex": "63e6b0b4",
+ "roundtrip": true,
+ "decoded": "水"
+ },
+ {
+ "cbor": "ZPCQhZE=",
+ "hex": "64f0908591",
+ "roundtrip": true,
+ "decoded": "𐅑"
+ },
+ {
+ "cbor": "gA==",
+ "hex": "80",
+ "roundtrip": true,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "gwECAw==",
+ "hex": "83010203",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3
+ ]
+ },
+ {
+ "cbor": "gwGCAgOCBAU=",
+ "hex": "8301820203820405",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
+ "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "oA==",
+ "hex": "a0",
+ "roundtrip": true,
+ "decoded": {
+ }
+ },
+ {
+ "cbor": "ogECAwQ=",
+ "hex": "a201020304",
+ "roundtrip": true,
+ "skip": true,
+ "diagnostic": "{1: 2, 3: 4}"
+ },
+ {
+ "cbor": "omFhAWFiggID",
+ "hex": "a26161016162820203",
+ "roundtrip": true,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhoWFiYWM=",
+ "hex": "826161a161626163",
+ "roundtrip": true,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
+ "hex": "a56161614161626142616361436164614461656145",
+ "roundtrip": true,
+ "decoded": {
+ "a": "A",
+ "b": "B",
+ "c": "C",
+ "d": "D",
+ "e": "E"
+ }
+ },
+ {
+ "cbor": "X0IBAkMDBAX/",
+ "hex": "5f42010243030405ff",
+ "roundtrip": false,
+ "skip": true,
+ "diagnostic": "(_ h'0102', h'030405')"
+ },
+ {
+ "cbor": "f2VzdHJlYWRtaW5n/w==",
+ "hex": "7f657374726561646d696e67ff",
+ "roundtrip": false,
+ "decoded": "streaming"
+ },
+ {
+ "cbor": "n/8=",
+ "hex": "9fff",
+ "roundtrip": false,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "nwGCAgOfBAX//w==",
+ "hex": "9f018202039f0405ffff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwGCAgOCBAX/",
+ "hex": "9f01820203820405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGCAgOfBAX/",
+ "hex": "83018202039f0405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGfAgP/ggQF",
+ "hex": "83019f0203ff820405",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
+ "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "v2FhAWFinwID//8=",
+ "hex": "bf61610161629f0203ffff",
+ "roundtrip": false,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhv2FiYWP/",
+ "hex": "826161bf61626163ff",
+ "roundtrip": false,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "v2NGdW71Y0FtdCH/",
+ "hex": "bf6346756ef563416d7421ff",
+ "roundtrip": false,
+ "decoded": {
+ "Fun": true,
+ "Amt": -2
+ }
+ }
+]
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/test.py b/src/kube2msb/vendor/github.com/ugorji/go/codec/test.py
new file mode 100644
index 0000000..c0ad20b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/test.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+# Ensure msgpack-python and cbor are installed first, using:
+# sudo apt-get install python-dev
+# sudo apt-get install python-pip
+# pip install --user msgpack-python msgpack-rpc-python cbor
+
+# Ensure all "string" keys are utf strings (else encoded as bytes)
+
+import cbor, msgpack, msgpackrpc, sys, os, threading
+
+def get_test_data_list():
+ # get list with all primitive types, and a combo type
+ l0 = [
+ -8,
+ -1616,
+ -32323232,
+ -6464646464646464,
+ 192,
+ 1616,
+ 32323232,
+ 6464646464646464,
+ 192,
+ -3232.0,
+ -6464646464.0,
+ 3232.0,
+ 6464.0,
+ 6464646464.0,
+ False,
+ True,
+ u"null",
+ None,
+ u"someday",
+ 1328176922000002000,
+ u"",
+ -2206187877999998000,
+ u"bytestring",
+ 270,
+ u"none",
+ -2013855847999995777,
+ #-6795364578871345152,
+ ]
+ l1 = [
+ { "true": True,
+ "false": False },
+ { "true": u"True",
+ "false": False,
+ "uint16(1616)": 1616 },
+ { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
+ "int32":32323232, "bool": True,
+ "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
+ "SHORT STRING": u"1234567890" },
+ { True: "true", 138: False, "false": 200 }
+ ]
+
+ l = []
+ l.extend(l0)
+ l.append(l0)
+ l.append(1)
+ l.extend(l1)
+ return l
+
+def build_test_data(destdir):
+ l = get_test_data_list()
+ for i in range(len(l)):
+ # packer = msgpack.Packer()
+ serialized = msgpack.dumps(l[i])
+ f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
+ f.write(serialized)
+ f.close()
+ serialized = cbor.dumps(l[i])
+ f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
+ f.write(serialized)
+ f.close()
+
+def doRpcServer(port, stopTimeSec):
+ class EchoHandler(object):
+ def Echo123(self, msg1, msg2, msg3):
+ return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
+ def EchoStruct(self, msg):
+ return ("%s" % msg)
+
+ addr = msgpackrpc.Address('localhost', port)
+ server = msgpackrpc.Server(EchoHandler())
+ server.listen(addr)
+ # run thread to stop it after stopTimeSec seconds if > 0
+ if stopTimeSec > 0:
+ def myStopRpcServer():
+ server.stop()
+ t = threading.Timer(stopTimeSec, myStopRpcServer)
+ t.start()
+ server.start()
+
+def doRpcClientToPythonSvc(port):
+ address = msgpackrpc.Address('localhost', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("Echo123", "A1", "B2", "C3")
+ print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doRpcClientToGoSvc(port):
+ # print ">>>> port: ", port, " <<<<<"
+ address = msgpackrpc.Address('localhost', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
+ print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doMain(args):
+ if len(args) == 2 and args[0] == "testdata":
+ build_test_data(args[1])
+ elif len(args) == 3 and args[0] == "rpc-server":
+ doRpcServer(int(args[1]), int(args[2]))
+ elif len(args) == 2 and args[0] == "rpc-client-python-service":
+ doRpcClientToPythonSvc(int(args[1]))
+ elif len(args) == 2 and args[0] == "rpc-client-go-service":
+ doRpcClientToGoSvc(int(args[1]))
+ else:
+ print("Usage: test.py " +
+ "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
+
+if __name__ == "__main__":
+ doMain(sys.argv[1:])
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh b/src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh
new file mode 100644
index 0000000..00857b6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# Run all the different permutations of all the tests.
+# This helps ensure that nothing gets broken.
+
+_run() {
+ # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i),
+ # binc-nosymbols (n), struct2array (s), intern string (e),
+ # json-indent (d), circular (l)
+ # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f)
+ # 3. OPTIONS: verbose (v), reset (z), must (m),
+ #
+ # Use combinations of mode to get exactly what you want,
+ # and then pass the variations you need.
+
+ ztags=""
+ zargs=""
+ local OPTIND
+ OPTIND=1
+ while getopts "_xurtcinsvgzmefdl" flag
+ do
+ case "x$flag" in
+ 'xr') ;;
+ 'xf') ztags="$ztags notfastpath" ;;
+ 'xg') ztags="$ztags codecgen" ;;
+ 'xx') ztags="$ztags x" ;;
+ 'xu') ztags="$ztags unsafe" ;;
+ 'xv') zargs="$zargs -tv" ;;
+ 'xz') zargs="$zargs -tr" ;;
+ 'xm') zargs="$zargs -tm" ;;
+ 'xl') zargs="$zargs -tl" ;;
+ *) ;;
+ esac
+ done
+ # shift $((OPTIND-1))
+ printf '............. TAGS: %s .............\n' "$ztags"
+ # echo ">>>>>>> TAGS: $ztags"
+
+ OPTIND=1
+ while getopts "_xurtcinsvgzmefdl" flag
+ do
+ case "x$flag" in
+ 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
+ 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;;
+ 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;;
+ 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;;
+ 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;;
+ 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;;
+ 'xd') printf ">>>>>>> INDENT : ";
+ go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs;
+ go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs;
+ sleep 2 ;;
+ *) ;;
+ esac
+ done
+ shift $((OPTIND-1))
+
+ OPTIND=1
+}
+
+# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
+if [[ "x$@" = "x" ]]; then
+ # All: r, x, g, gu
+ _run "-_tcinsed_ml" # regular
+ _run "-_tcinsed_ml_z" # regular with reset
+ _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath)
+ _run "-x_tcinsed_ml" # external
+ _run "-gx_tcinsed_ml" # codecgen: requires external
+ _run "-gxu_tcinsed_ml" # codecgen + unsafe
+elif [[ "x$@" = "x-Z" ]]; then
+ # Regular
+ _run "-_tcinsed_ml" # regular
+ _run "-_tcinsed_ml_z" # regular with reset
+elif [[ "x$@" = "x-F" ]]; then
+ # regular with notfastpath
+ _run "-_tcinsed_ml_f" # regular
+ _run "-_tcinsed_ml_zf" # regular with reset
+else
+ _run "$@"
+fi
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/time.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/time.go
new file mode 100644
index 0000000..718b731
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/time.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+var (
+ timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+ timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) {
+ defer panicToErr(&err)
+ bs = timeExt{}.WriteExt(rv.Interface())
+ return
+ }
+ timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) {
+ defer panicToErr(&err)
+ timeExt{}.ReadExt(rv.Interface(), bs)
+ return
+ }
+)
+
+type timeExt struct{}
+
+func (x timeExt) WriteExt(v interface{}) (bs []byte) {
+ switch v2 := v.(type) {
+ case time.Time:
+ bs = encodeTime(v2)
+ case *time.Time:
+ bs = encodeTime(*v2)
+ default:
+ panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
+ }
+ return
+}
+func (x timeExt) ReadExt(v interface{}, bs []byte) {
+ tt, err := decodeTime(bs)
+ if err != nil {
+ panic(err)
+ }
+ *(v.(*time.Time)) = tt
+}
+
+func (x timeExt) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+func (x timeExt) UpdateExt(v interface{}, src interface{}) {
+ x.ReadExt(v, src.([]byte))
+}
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+// A timestamp is composed of 3 components:
+//
+// - secs: signed integer representing seconds since unix epoch
+// - nsces: unsigned integer representing fractional seconds as a
+// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+// - tz: signed integer representing timezone offset in minutes east of UTC,
+// and a dst (daylight savings time) flag
+//
+// When encoding a timestamp, the first byte is the descriptor, which
+// defines which components are encoded and how many bytes are used to
+// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+// is not encoded in the byte array explicitly*.
+//
+// Descriptor 8 bits are of the form `A B C DDD EE`:
+// A: Is secs component encoded? 1 = true
+// B: Is nsecs component encoded? 1 = true
+// C: Is tz component encoded? 1 = true
+// DDD: Number of extra bytes for secs (range 0-7).
+// If A = 1, secs encoded in DDD+1 bytes.
+// If A = 0, secs is not encoded, and is assumed to be 0.
+// If A = 1, then we need at least 1 byte to encode secs.
+// DDD says the number of extra bytes beyond that 1.
+// E.g. if DDD=0, then secs is represented in 1 byte.
+// if DDD=2, then secs is represented in 3 bytes.
+// EE: Number of extra bytes for nsecs (range 0-3).
+// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+// Following the descriptor bytes, subsequent bytes are:
+//
+// secs component encoded in `DDD + 1` bytes (if A == 1)
+// nsecs component encoded in `EE + 1` bytes (if B == 1)
+// tz component encoded in 2 bytes (if C == 1)
+//
+// secs and nsecs components are integers encoded in a BigEndian
+// 2-complement encoding format.
+//
+// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+// Least significant bit 0 are described below:
+//
+// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+// Bit 15 = have\_dst: set to 1 if we set the dst flag.
+// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func encodeTime(t time.Time) []byte {
+ //t := rv.Interface().(time.Time)
+ tsecs, tnsecs := t.Unix(), t.Nanosecond()
+ var (
+ bd byte
+ btmp [8]byte
+ bs [16]byte
+ i int = 1
+ )
+ l := t.Location()
+ if l == time.UTC {
+ l = nil
+ }
+ if tsecs != 0 {
+ bd = bd | 0x80
+ bigen.PutUint64(btmp[:], uint64(tsecs))
+ f := pruneSignExt(btmp[:], tsecs >= 0)
+ bd = bd | (byte(7-f) << 2)
+ copy(bs[i:], btmp[f:])
+ i = i + (8 - f)
+ }
+ if tnsecs != 0 {
+ bd = bd | 0x40
+ bigen.PutUint32(btmp[:4], uint32(tnsecs))
+ f := pruneSignExt(btmp[:4], true)
+ bd = bd | byte(3-f)
+ copy(bs[i:], btmp[f:4])
+ i = i + (4 - f)
+ }
+ if l != nil {
+ bd = bd | 0x20
+ // Note that Go Libs do not give access to dst flag.
+ _, zoneOffset := t.Zone()
+ //zoneName, zoneOffset := t.Zone()
+ zoneOffset /= 60
+ z := uint16(zoneOffset)
+ bigen.PutUint16(btmp[:2], z)
+ // clear dst flags
+ bs[i] = btmp[0] & 0x3f
+ bs[i+1] = btmp[1]
+ i = i + 2
+ }
+ bs[0] = bd
+ return bs[0:i]
+}
+
+// DecodeTime decodes a []byte into a time.Time.
+func decodeTime(bs []byte) (tt time.Time, err error) {
+ bd := bs[0]
+ var (
+ tsec int64
+ tnsec uint32
+ tz uint16
+ i byte = 1
+ i2 byte
+ n byte
+ )
+ if bd&(1<<7) != 0 {
+ var btmp [8]byte
+ n = ((bd >> 2) & 0x7) + 1
+ i2 = i + n
+ copy(btmp[8-n:], bs[i:i2])
+ //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+ if bs[i]&(1<<7) != 0 {
+ copy(btmp[0:8-n], bsAll0xff)
+ //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
+ }
+ i = i2
+ tsec = int64(bigen.Uint64(btmp[:]))
+ }
+ if bd&(1<<6) != 0 {
+ var btmp [4]byte
+ n = (bd & 0x3) + 1
+ i2 = i + n
+ copy(btmp[4-n:], bs[i:i2])
+ i = i2
+ tnsec = bigen.Uint32(btmp[:])
+ }
+ if bd&(1<<5) == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ return
+ }
+ // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+ // However, we need name here, so it can be shown when time is printed.
+ // Zone name is in form: UTC-08:00.
+ // Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+ i2 = i + 2
+ tz = bigen.Uint16(bs[i:i2])
+ i = i2
+ // sign extend sign bit into top 2 MSB (which were dst bits):
+ if tz&(1<<13) == 0 { // positive
+ tz = tz & 0x3fff //clear 2 MSBs: dst bits
+ } else { // negative
+ tz = tz | 0xc000 //set 2 MSBs: dst bits
+ //tzname[3] = '-' (TODO: verify. this works here)
+ }
+ tzint := int16(tz)
+ if tzint == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ } else {
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ // var zoneName = timeLocUTCName(tzint)
+ tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+ }
+ return
+}
+
+func timeLocUTCName(tzint int16) string {
+ if tzint == 0 {
+ return "UTC"
+ }
+ var tzname = []byte("UTC+00:00")
+ //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+ //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+ var tzhr, tzmin int16
+ if tzint < 0 {
+ tzname[3] = '-' // (TODO: verify. this works here)
+ tzhr, tzmin = -tzint/60, (-tzint)%60
+ } else {
+ tzhr, tzmin = tzint/60, tzint%60
+ }
+ tzname[4] = timeDigits[tzhr/10]
+ tzname[5] = timeDigits[tzhr%10]
+ tzname[7] = timeDigits[tzmin/10]
+ tzname[8] = timeDigits[tzmin%10]
+ return string(tzname)
+ //return time.FixedZone(string(tzname), int(tzint)*60)
+}